def submit_model_data(self,
                          attachments=None,
                          gzip=False,
                          info_callback=None,
                          info_to_monitor=None):
        """Submits a job to the cloud service.

        Args:
            attachments: A list of attachments. Each attachement is a dict with
                the following keys:
                   - 'name' : the name of the attachment
                   - 'data' : the data for the attachment
            gzip: If ``True``, data is gzipped before sent over the network
            info_callback: A call back to be called when some info are available.
                That callback takes one parameter that is a dict containing
                the info as they are available.
            info_to_monitor: A set of information to monitor with info_callback.
                Currently, can be ``jobid`` and ``progress``.
        """
        self.__vars = None
        self.timed_out = False
        self.results.clear()

        if not info_to_monitor:
            info_to_monitor = {}

        # check that url is valid
        parts = urlparse(self.docloud_context.url)
        if not parts.scheme:
            raise DOcloudConnectorException(
                "Malformed URL: '%s': No schema supplied." %
                self.docloud_context.url)

        proxies = self.docloud_context.proxies
        try:
            client = JobClient(self.docloud_context.url,
                               self.docloud_context.key,
                               proxies=proxies)
        except TypeError:
            # docloud client <= 1.0.172 do not have the proxes
            warnings.warn(
                "Using a docloud client that do not support warnings in init()",
                UserWarning)
            client = JobClient(self.docloud_context.url,
                               self.docloud_context.key)
        self.log("client created")
        if proxies:
            self.log("proxies = %s" % proxies)

        # prepare client
        if self.docloud_context.log_requests:
            client.rest_callback = \
                lambda m, u, *a, **kw: self._rest_callback(m, u, *a, **kw)
        client.verify = self.docloud_context.verify
        client.timeout = self.docloud_context.get('timeout', None)

        try:
            try:
                # Extract the list of attachment names
                att_names = [a['name'] for a in attachments]

                # create job
                jobid = client.create_job(
                    attachments=att_names,
                    parameters=self.docloud_context.job_parameters)
                self.log("job creation submitted, id is: {0!s}".format(jobid))
                if info_callback and 'jobid' in info_to_monitor:
                    info_callback({'jobid': jobid})
            except ConnectionError as c_e:
                raise DOcloudConnectorException(
                    "Cannot connect to {0}, error: {1}".format(
                        self.docloud_context.url, str(c_e)))

            try:
                # now upload data
                for a in attachments:
                    pos = 0
                    if 'data' in a:
                        att_data = {'data': a['data']}
                    elif 'file' in a:
                        att_data = {'file': a['file']}
                        pos = a['file'].tell()
                    elif 'filename' in a:
                        att_data = {'filename': a['filename']}

                    client.upload_job_attachment(jobid,
                                                 attid=a['name'],
                                                 **att_data)
                    self.log("Attachment: %s has been uploaded" % a['name'])
                    if self.docloud_context.debug_dump_dir:
                        target_dir = self.docloud_context.debug_dump_dir
                        if not os.path.exists(target_dir):
                            os.makedirs(target_dir)
                        self.log("Dumping input attachment %s to dir %s" %
                                 (a['name'], target_dir))
                        with open(os.path.join(target_dir, a['name']),
                                  "wb") as f:
                            if 'data' in 'a':
                                if isinstance(a['data'], bytes):
                                    f.write(a['data'])
                                else:
                                    f.write(a['data'].encode('utf-8'))
                            else:
                                a['file'].seek(pos)
                                f.write(a['file'])
                # execute job
                client.execute_job(jobid)
                self.log("DOcplexcloud execute submitted has been started")
                # get job execution status until it's processed or failed
                timedout = False
                try:
                    self._executionStatus = self.wait_for_completion(
                        client,
                        jobid,
                        info_callback=info_callback,
                        info_to_monitor=info_to_monitor)
                except DOcloudInterruptedException:
                    timedout = True
                self.log("docloud execution has finished")
                # get job status. Do this before any time out handling
                self.jobInfo = client.get_job(jobid)

                if self.docloud_context.fire_last_progress and info_callback:
                    progress_data = self.map_job_info_to_progress_data(
                        self.jobInfo)
                    info_callback({'progress': progress_data})

                if timedout:
                    self.timed_out = True
                    self.log("Solve timed out after {waittime} sec".format(
                        waittime=self.docloud_context.waittime))
                    return
                # get solution => download all attachments
                try:
                    for a in client.get_job_attachments(jobid):
                        if a['type'] == 'OUTPUT_ATTACHMENT':
                            name = a['name']
                            self.log("Downloading attachment '%s'" % name)
                            attachment_as_string = self._as_string(
                                client.download_job_attachment(jobid,
                                                               attid=name))
                            self.results[name] = attachment_as_string
                            if self.docloud_context.debug_dump_dir:
                                target_dir = self.docloud_context.debug_dump_dir
                                if not os.path.exists(target_dir):
                                    os.makedirs(target_dir)
                                self.log("Dumping attachment %s to dir %s" %
                                         (name, target_dir))
                                with open(os.path.join(target_dir, name),
                                          "wb") as f:
                                    f.write(
                                        attachment_as_string.encode('utf-8'))
                except DOcloudNotFoundError:
                    self.log("no solution in attachment")
                self.log("docloud results have been received")
                # on_solve_finished_cb
                if self.docloud_context.on_solve_finished_cb:
                    self.docloud_context.on_solve_finished_cb(jobid=jobid,
                                                              client=client,
                                                              connector=self)
                return
            finally:
                if self.docloud_context.delete_job:
                    deleted = client.delete_job(jobid)
                    self.log("delete status for job: {0!s} = {1!s}".format(
                        jobid, deleted))

        finally:
            client.close()
class Optimizer(object):
    '''
     Handles the actual optimization task.
     Creates and executes a job builder for an optimization problem instance.
     Encapsulates the DOCloud API.
     This class is designed to facilitate multiple calls to the optimizer, such as would occur in a decomposition algorithm,
     although it transparently supports single use as well.
     In particular, the data can be factored into a constant data set that does not vary from run to run (represented by a JSON or .dat file)
     and a variable piece that does vary (represented by a Collector object).
     The optimization model can also be factored into two pieces, a best practice for large models and multi-models:
     A data model that defines the tuples and tuple sets that will contain the input and output data.
     An optimization model that defines the decision variables, decision expressions, objective function,
     constraints, and pre- and post-processing data transformations.
     Factoring either the data or the optimization model in this fashion is optional.

     The problem instance is specified by the OPL model and input data received from the invoking (e.g. ColumnGeneration) instance.
     Input and output data are realized as instances of OPLCollector, which in turn are specified by their respective schemas.
     This class is completely independent of the specific optimization problem to be solved.
    '''

    def __init__(self, problemName, model=None, resultDataModel=None, credentials=None, *attachments):
        '''
         Constructs an Optimizer instance.
         The instance requires an optimization model as a parameter.
         You can also provide one or more data files as attachments, either in OPL .dat or in JSON format. This data does not
         change from solve to solve. If you have input data that does change, you can provide it to the solve method as an OPLCollector object.
         :param problemName: name of this optimization problem instance
         :type problemName: String
         :param model: an optimization model written in OPL
         :type model: Model.Source object or String
         :param resultDataModel: the application data model for the results of the optimization
         :type resultDataModel: dict<String, StructType>
         :param credentials: DOcplexcloud url and api key
         :type credentials: {"url":String, "key":String}
         :param attachments: URLs for files representing the data that does not vary from solve to solve
         :type attachments: list<URL>
        '''
        self.name = problemName
        self.model = model
        self.resultDataModel = resultDataModel
        self.attachData(attachments)
        self.streamsRegistry = []
        self.history = []

        self.credentials = credentials

        self.jobclient = JobClient(credentials["url"], credentials["key"]);
        self.solveStatus = JobSolveStatus.UNKNOWN;

    def getName(self):
        """
        Returns the name of this problem
        """
        return self.name

    def setOPLModel(self, name, dotMods=None, modelText=None):
        '''
         Sets the OPL model.
         This method can take any number of dotMod arguments, but
         there are two common use cases:
         First, the optimization model can be composed of two pieces:
             A data model that defines the tuples and tuple sets that will contain the input and output data.
             An optimization model that defines the decision variables, decision expressions, objective function,
             constraints, and pre- and post-processing data transformations.
             The two are concatenated, so they must be presented in that order.
             If such a composite model is used, you do not need to import the data model into the optimization model using an OPL include statement.
         Second, you do not have to use a separate data model, in which case a single dotMod must be provided
         which encompasses both the data model and the optimization model.
        @param name: the name assigned to this OPL model (should have the format of a file name with a .mod extension)
        @type name: String
        @param dotMods: URLs pointing to OPL .mod files, which will be concatenated in the order given
        @type dotMods: List<URL>
        @param modelText: the text of the OPL model, which will be concatenated in the order given
        @type modelText: List<String>
        @return this optimizer
        @raise ValueError if a model has already been defined or if dotMods or modelText is empty
        '''
        if self.model is not None:
            raise ValueError("model has already been set")
        self.model = ModelSource(name=name, dotMods=dotMods, modelText=modelText)
        return self

    def setResultDataModel(self, resultDataModel):
        '''
        Sets the application data model for the results of the optimization
        @param resultDataModel: the application data model for the results of the optimization
        @type resultDataModel: dict<String, StructType>
        '''
        if self.resultDataModel is not None:
            raise ValueError("results data model has already been defined")
        self.resultDataModel = resultDataModel
        return self

    def attachData(self, attachments):
        '''
        Attaches one or more data files, either in OPL .dat or in JSON format. This data does not
        change from solve to solve. If you have input data that does change, you can provide it as a Collector object.
        @param attachments: files representing the data that does not vary from solve to solve
        @type attachments: list<URL>
        @return this optimizer
        @raise ValueError if an item of the same name has already been attached
        '''
        self.attachments = {}
        if attachments is not None:
            for f in attachments:
                fileName = os.path.splitext(os.path.basename(urlparse(f)))[0]
                if fileName in self.attachments:
                    raise ValueError(fileName + " already attached")
                self.attachments[fileName] = f
        return self;

    def solve(self, inputData=None, solutionId=""):
        '''
        Solves an optimization problem instance by calling the DOCloud solve service (Oaas).
        Creates a new job request, incorporating any changes to the variable input data,
        for a problem instance to be processed by the solve service.
        Once the problem is solved, the results are mapped to an instance of an OPL Collector.
        Note: this method will set a new destination for the JSON serialization of the input data.
        @param inputData: the variable, solve-specific input data
        @type inputData: OPLCollector
        @param solutionId: an identifier for the solution, used in iterative algorithms (set to empty string if not needed)
        @type solutionId: String
        @return: a solution collector
        '''
        inputs = []
        if self.model is None:
            raise ValueError("A model attachment must be provided to the optimizer")
        if self.model:  # is not empty
            stream = self.model.toStream()
            inputs.append({"name": self.model.getName(), "file": stream})
            self.streamsRegistry.append(stream)
        if self.attachments:  # is not empty
            for f in self.attachments:
                stream = urllib.FancyURLopener(self.attachments[f])
                inputs.append({"name": f, "file": stream})
                self.streamsRegistry.append(stream)
        if inputData is not None:
            outStream = cStringIO.StringIO()
            inputData.setJsonDestination(outStream).toJSON()
            inStream = cStringIO.StringIO(outStream.getvalue())
            inputs.append({"name": inputData.getName() + ".json", "file": inStream})
            self.streamsRegistry.extend([outStream, inStream])

        response = self.jobclient.execute(
            input=inputs,
            output="results.json",
            load_solution=True,
            log="solver.log",
            gzip=True,
            waittime=300,  # seconds
            delete_on_completion=False)

        self.jobid = response.jobid

        status = self.jobclient.get_execution_status(self.jobid)
        if status == JobExecutionStatus.PROCESSED:
            results = cStringIO.StringIO(response.solution)
            self.streamsRegistry.append(results)
            self.solveStatus = response.job_info.get(
                'solveStatus')  # INFEASIBLE_SOLUTION or UNBOUNDED_SOLUTION or OPTIMAL_SOLUTION or...
            solution = (OPLCollector(self.getName() + "Result" + solutionId, self.resultDataModel)).setJsonSource(
                results).fromJSON()
            self.history.append(solution)
        elif status == JobExecutionStatus.FAILED:
            # get failure message if defined
            message = ""
            if (response.getJob().getFailureInfo() != None):
                message = response.getJob().getFailureInfo().getMessage()
            print("Failed " + message)
        else:
            print("Job Status: " + status)

        for s in self.streamsRegistry:
            s.close();
        self.jobclient.delete_job(self.jobid);

        return solution

    def getSolveStatus(self):
        """
        @return the solve status as a string
        Attributes:
            UNKNOWN: The algorithm has no information about the solution.
            FEASIBLE_SOLUTION: The algorithm found a feasible solution.
            OPTIMAL_SOLUTION: The algorithm found an optimal solution.
            INFEASIBLE_SOLUTION: The algorithm proved that the model is infeasible.
            UNBOUNDED_SOLUTION: The algorithm proved the model unbounded.
            INFEASIBLE_OR_UNBOUNDED_SOLUTION: The model is infeasible or unbounded.
        """
        return self.solveStatus