Esempio n. 1
0
    def run_model(self, model, scenario, is_dods=True, override_env=None):
        '''Run the scenario from the specified model.

        Returns:
            a dict with the output tables.
        '''
        variables = {}
        variables['inputs'] = self.handler.get_inputs(model, scenario)
        variables['output_lock'] = get_environment().output_lock
        variables['outputs'] = {}

        self.override_env = None
        if isinstance(get_environment(), LocalEnvironment):
            if override_env is None:
                override_env = _WorkerEnvironmentOverride(get_environment())
            self.override_env = override_env

        with OverrideEnvironment(self.override_env):
            saved_environ = os.environ.copy()
            try:
                if is_dods:
                    os.environ['IS_DODS'] = 'True'
                    os.environ['DOCPLEX_CONTEXT'] = 'solver.auto_publish=True'
                with self.handler.get_input_stream(model, scenario,
                                                   'model.py') as m:
                    try:
                        contents = m.read()
                        if not isinstance(contents, str):
                            contents = contents.decode('utf-8')
                        exec(contents, variables)
                    except SyntaxError as err:
                        error_class = err.__class__.__name__
                        detail = err.args[0]
                        line_number = err.lineno
                        imsg = 'File "model.py", line %s\n' % line_number
                        imsg += err.text.rstrip() + '\n'
                        spaces = ' ' * (err.offset -
                                        1) if err.offset > 1 else ''
                        imsg += spaces + "^\n"
                        imsg += '%s: %s\n' % (error_class, detail)
                        raise InterpreterError(imsg)
                    except Exception as err:
                        error_class = err.__class__.__name__
                        detail = err.args[0]
                        _, _, tb = sys.exc_info()
                        ttb = traceback.extract_tb(tb)
                        ttb[1] = ('model.py', ttb[1][1], ttb[1][2],
                                  get_line_of_model(ttb[1][1], self.handler,
                                                    model, scenario))
                        line_number = ttb[1][1]
                        ttb = ttb[1:]
                        s = traceback.format_list(ttb)
                        imsg = (''.join(s))
                        imsg += '%s: %s\n' % (error_class, detail)
                        raise InterpreterError(imsg)
                return variables['outputs']
            finally:
                os.environ = saved_environ
Esempio n. 2
0
 def __init__(self, l, D, L, ek, name):
   t0 = time.time()
   print("iniciovalerio")
   x = {}
   y = [] #variavel auxiliar para imprimir a construção de arcos
   p = [] #variável auxiliar para construção das restrições
   q = [] #variável auxiliar para construção das restrições
   r1 = [] #variável auxiliar para construção das restrições
   r2 = [] #variável auxiliar para construção das restrições
   d = [] #variável auxiliar para a construção da demanda
   f = []
   vcm = Model(name='valeriodecarvalho')
   lmin = np.amin(l)
   self.criterio2(L, lmin, x, vcm)
   self.criterio1(l, x, vcm, L)
   self.getvar(vcm, y) 
   #print(y)
   z = []
   self.conservF(vcm, p, q, L, l, f, r1, r2, D, d, ek) 
   for k in range(0, len(L)):
     z.append(vcm.get_var_by_name('z_' + str(k)))
   #print(z)
   self.getvar(vcm, y)
   consts = []
   n = vcm.number_of_constraints
   for i in range(n):
     consts.append(vcm.get_constraint_by_index(i))
   vcms = vcm.solve(url=None, key=None, log_output=True)
   tempo = time.time() - t0,
   reseau = open(name, 'w', 0)
   reseau.write('Função Objetivo: ' + str(vcm.solution.get_objective_value))
   reseau.write('\nTempo Total: ' + str(tempo))
   reseau.close()
   with get_environment().get_output_stream("solution.json") as fp:
     vcm.solution.export(fp, "json")
Esempio n. 3
0
    def run_optimization(self):
        self.area_discr()
        self.dec_var()

        self.general_constr()
        self.charge_constr()
        self.time_constr()
        self.create_obj_funct()
        #self.plp.update()

        #        self.cm.write("cplex.lp")  # write to LP file
        #self.plp.setParam('Timelimit', 5)  # Time limit in seconds
        #self.plp.setParam('MIPGap', .1)  # Set a gap at which it stops with 0.1 equal to 10 percent

        self.cm.print_information()
        s = self.cm.solve()
        self.cm.display()
        if s:
            qty_vars = self.cm.find_matching_vars(pattern="q_")
            for fv in qty_vars:
                food_name = fv.name[2:]
                print("Buy {0:<25} = {1:9.6g}".format(food_name,
                                                      fv.solution_value))

            self.cm.report_kpis()
            # Save the CPLEX solution as "solution.json" program output
            with get_environment().get_output_stream("solution.json") as fp:
                self.cm.solution.export(fp, "json")
        else:
            print("* model has no solution")
Esempio n. 4
0
def get_environment():
    """ Get the environment descriptor

    Returns:
        Environment descriptor, None if none
    """
    return None if runenv is None else runenv.get_environment()
Esempio n. 5
0
def new_solver_created(solver):
    """ Add environment solver listener if needed
    Args:
        solver:  Solver to update
    """
    # Check if environment package not present
    if runenv is None:
        return

    # Check no environment
    env = runenv.get_environment()
    if env is None:
        return

    # Retrieve auto_publish context
    pctx = solver.context.solver.auto_publish
    if pctx is True:
        # Create default context to retrieve always default values
        pctx = Context()
    elif not isinstance(pctx, Context):
        return

    # Check no local publish
    if isinstance(env, runenv.LocalEnvironment) and not pctx.local_publish:
        return

    # Add solver listener
    solver.add_listener(EnvSolverListener(env, pctx))
Esempio n. 6
0
    def write_output_table(self, df, context,
                           output_property_name=None,
                           output_name=None):
        '''Publishes the output `df`.

        The `context` is used to control the output name:

            - If context.solver.auto_publish is true, the `df` is written using
            output_name.
            - If context.solver.auto_publish is false, This method does nothing.
            - If context.solver.auto_publish.output_property_name is true,
               then `df` is written using output_name.
            - If context.solver.auto_publish.output_propert_name is None or
            False, this method does nothing.
            - If context.solver.auto_publish.output_propert_name is a string,
            it is used as a name to publish the df

        Example:

            A solver can be defined as publishing a result as data frame::

                class SomeSolver(PublishResultAsDf)
                   def __init__(self, output_customizer):
                      # output something if context.solver.autopublish.somesolver_output is set
                      self.output_table_property_name = 'somesolver_output'
                      # output filename unless specified by somesolver_output:
                      self.default_output_table_name = 'somesolver.csv'
                      # customizer if users wants one
                      self.output_table_customizer = output_customizer
                      # uses pandas.DataFrame if possible, otherwise will use namedtuples
                      self.output_table_using_df = True

                    def solve(self):
                        # do something here and return a result as a df
                        result = pandas.DataFrame(columns=['A','B','C'])
                        return result

            Example usage::

               solver = SomeSolver()
               results = solver.solve()
               solver.write_output_table(results)

        '''
        prop = value_if_defined(self, 'output_table_property_name')
        prop = output_property_name if output_property_name else prop
        default_name = value_if_defined(self, 'default_output_table_name')
        default_name = output_name if output_name else default_name
        names = get_auto_publish_names(context, prop, default_name)
        use_df = value_if_defined(self, 'output_table_using_df', True)
        if names:
            env = get_environment()
            customizer = value_if_defined(self, 'output_table_customizer', identity_func)
            for name in names:
                r = customizer(df)
                if pandas and use_df:
                    env.write_df(r, name)
                else:
                    # assume r is a namedtuple
                    write_csv(env, r, r[0]._fields, name)
Esempio n. 7
0
def _get_environment(solver, prop):
    """ Get the environment to call, checking if auto-publish is required.
    Args:
        solver: Source CPO solver
        prop:   Auto_publish specific property that should be checked
    Returns:
        Environment to call, None if none
    """
    # Check if environment available
    if not ENVIRONMENT_PRESENT:
        return None

    # Skip if environment is local
    env = runenv.get_environment()
    if isinstance(env, runenv.LocalEnvironment):
        return None

    # Check auto_publish config
    pblsh = solver.context.solver.auto_publish
    if (pblsh is None) or not (
        (pblsh is True) or
        (isinstance(pblsh, Context) and pblsh.get_attribute(prop))):
        return None

    # Return
    return env
    def _init_listener(self, solver):
        """ Initialize this listener
        Args:
            solver:  Calling solver
        Returns:
            True if listener is OK, False if should be removed from solver.
        """
        # Check if environment package not present
        self.env = None if runenv is None else runenv.get_environment()
        if self.env is None:
            return False

        # Retrieve auto_publish context
        pctx = solver.context.solver.auto_publish
        if pctx is True:
            # Create default context to retrieve always default values
            pctx = Context()
        elif not isinstance(pctx, Context):
            return False
        self.publish_context = pctx

        # Check no local publish
        if isinstance(self.env,
                      runenv.LocalEnvironment) and not pctx.local_publish:
            return False

        # Keep as listener
        return True
Esempio n. 9
0
    def notify_solution(self, incumbents):
        super(KpiRecorder, self).notify_solution(incumbents)
        publish_name_fn = self.publish_name_fn
        if self._last_accept:
            self._report_count += 1
            # build a name/value dictionary with builtin values
            k = self._model.kpis_as_dict(self.current_solution, use_names=True)
            name_values = {publish_name_fn(kn): kv for kn, kv in iteritems(k)}

            # This must be this value for the current objective
            name_values['PROGRESS_CURRENT_OBJECTIVE'] = self.current_solution.objective_value
            # predefined keys, not KPIs
            # name_values[publish_name_fn('_objective')] = self.current_solution.objective_value
            name_values[publish_name_fn('_time')] = self._last_time

            self._kpis.append(name_values)

            # usually publish kpis in environment...
            if self.publish_hook is not None:
                self.publish_hook(name_values)

            # save kpis.csv table
            context = self._model.context
            if auto_publising_kpis_table_names(context) is not None:
                write_kpis_table(env=get_environment(),
                                 context=context,
                                 model=self._model,
                                 solution=self.current_solution)
Esempio n. 10
0
    def notify_solution(self, sol):
        pdata = self.current_progress_data

        publish_name_fn = self.publish_name_fn
        # 1. build a dict from formatted names to kpi values.
        name_values = {
            publish_name_fn(kp.name): kp.compute(sol)
            for kp in self.model.iter_kpis()
        }
        # 2. add predefined keys for obj, time.
        name_values['PROGRESS_CURRENT_OBJECTIVE'] = sol.objective_value
        name_values[publish_name_fn('_time')] = pdata.time

        # 3. store it (why???)
        self._kpi_dicts.append(name_values)

        # usually publish kpis in environment...
        if self.publish_hook is not None:
            self.publish_hook(name_values)

        # save kpis.csv table
        context = self._context
        if auto_publising_kpis_table_names(context) is not None:
            write_kpis_table(env=get_environment(),
                             context=context,
                             model=self.model,
                             solution=sol)
Esempio n. 11
0
def is_in_docplex_worker():
    try:
            import docplex.util.environment as runenv
            is_in_worker = isinstance(runenv.get_environment(), runenv.WorkerEnvironment)
    except:
        is_in_worker = False
    return is_in_worker
Esempio n. 12
0
 def __init__(self, **kwargs):
     super(SolverContext, self).__init__(**kwargs)
     self.log_output = False
     self.max_threads = get_environment().get_available_core_count()
     self.auto_publish = create_default_auto_publish_context()
     self.kpi_reporting = BaseContext()
     from docplex.mp.progress import ProgressClock
     self.kpi_reporting.filter_level = ProgressClock.Gap
Esempio n. 13
0
def get_all_inputs():
    result = {}
    env = get_environment()
    for iname in [f for f in os.listdir('.') if splitext(f)[1] == '.csv']:
        df = env.read_df(iname, index_col=None)
        datasetname, _ = splitext(iname)
        result[datasetname] = df
    return result
Esempio n. 14
0
def run_LP(num_types, names, prices, procs, gbRAM, freestorage,
           max_cost_in_previous_time_window, account_limits, min_cores,
           min_ram, min_free_storage, max_cost_hour, ram_per_job,
           procs_per_job, aws_instances, **kwargs):
    avoid_instances = set()
    rpj_helper = zip(names, procs)
    ppj_helper = zip(names, gbRAM)
    a1 = filter(lambda x: x[1] < ram_per_job, rpj_helper)
    a2 = filter(lambda x: x[1] < procs_per_job, ppj_helper)
    avoidp = a1 + a2
    avoid_names = map(lambda x: x[0], avoidp)
    serversp = zip(names, max_cost_in_previous_time_window, [0] * num_types,
                   account_limits)
    server_characteristicsp = zip(names, procs, gbRAM, freestorage)
    servers = filter(lambda x: x[0] not in avoid_names, serversp)
    server_characteristics = filter(lambda x: x[0] not in avoid_names,
                                    server_characteristicsp)
    job_parameters = []
    job_parameters.append(("min_cores", min_cores, min_cores * 5))
    job_parameters.append(("min_ram", min_ram, min_ram * 5))
    job_parameters.append(
        ("min_free_storage", min_free_storage, min_free_storage * 5))
    Server = namedtuple("Instance", ["name", "cost", "qmin", "qmax"])
    Job_param = namedtuple("Param", ["name", "qmin", "qmax"])
    server = [Server(*s) for s in servers]
    assert (len(server) > 0)
    params = [Job_param(*j) for j in job_parameters]
    server_info = {(sc[0], params[j].name): sc[1 + j]
                   for sc in server_characteristics
                   for j in range(len(job_parameters))}
    mdl = Model(name='Instance Bidding')
    qty = {
        s: mdl.integer_var(lb=s.qmin, ub=s.qmax, name=s.name)
        for s in server
    }
    for p in params:
        amount = mdl.sum(qty[s] * server_info[s.name, p.name] for s in server)
        mdl.add_range(p.qmin, amount, p.qmax)
        mdl.add_kpi(amount, publish_name="Total %s" % p.name)
    mdl.minimize(mdl.sum(qty[s] * s.cost for s in server))
    mdl.print_information()
    url = None
    key = None
    if not mdl.solve(url=url, key=key):
        print("*** Problem has no solution")
    else:
        mdl.float_precision = 3
        print("* model solved as function:")
        mdl.report()
        mdl.print_solution()
        mdl.report_kpis()
        mdl.export_as_lp("cplex.lp")
        os.system("cat cplex.lp")
        # Save the CPLEX solution as "solution.json" program output
        with get_environment().get_output_stream("instances.json") as fp:
            mdl.solution.export(fp, "json")
    return
Esempio n. 15
0
def get_environment():
    """ Returns the Environment object that represents the actual execution environment.

    Returns:
        Environment descriptor, None if none.
    """
    # Check if environment available
    if not ENVIRONMENT_PRESENT:
        return None
    return runenv.get_environment()
Esempio n. 16
0
def write_all_outputs(outputs):
    '''Write all dataframes in ``outputs`` as .csv.

    Args:
        outputs: The map of outputs 'outputname' -> 'output df'
    '''
    for (name, df) in iteritems(outputs):
        csv_file = '%s.csv' % name
        with get_environment().get_output_stream(csv_file) as fp:
            fp.write(df.to_csv(index=False))
Esempio n. 17
0
def get_all_inputs():
    '''Utility method to read a list of files and return a tuple with all
    read data frames.
    Returns:
        a map { datasetname: data frame }
    '''
    result = {}
    env = get_environment()
    for iname in [f for f in os.listdir('.') if splitext(f)[1] == '.csv']:
        with env.get_input_stream(iname) as in_stream:
            df = pandas.read_csv(in_stream)
            datasetname, _ = splitext(iname)
            result[datasetname] = df
    return result
Esempio n. 18
0
def write_all_outputs(outputs):
    '''Write all dataframes in ``outputs`` as .csv.

    Args:
        outputs: The map of outputs 'outputname' -> 'output df'
    '''
    global output_lock
    with output_lock:
        for (name, df) in iteritems(outputs):
            csv_file = '%s.csv' % name
            with get_environment().get_output_stream(csv_file) as fp:
                fp.write(df.to_csv(index=False))
    if len(outputs) == 0:
        print("Warning: no outputs written")
Esempio n. 19
0
def get_all_inputs():
    '''Utility method to read a list of files and return a tuple with all
    read data frames.

    Returns:
        a map { datasetname: data frame }
    '''
    result = {}
    env = get_environment()
    for iname in [f for f in os.listdir('.') if splitext(f)[1] == '.csv']:
        df = env.read_df(iname, index_col=None)
        datasetname, _ = splitext(iname)
        result[datasetname] = df
    return result
Esempio n. 20
0
def set_stop_callback(cb):
    env = get_environment()
    try:
        env.set_stop_callback(cb)
    except AttributeError:
        # env.set_stop_callback does not exists -> older version of docplex
        # use work around
        try:
            import docplex.worker.solvehook as worker_env
            hook = worker_env.get_solve_hook()
            if hook:
                hook.set_stop_callback(cb)
        finally:
            # ignore errors
            pass
Esempio n. 21
0
def write_all_outputs(outputs):
    '''Write all dataframes in ``outputs`` as .csv.

    Args:
        outputs: The map of outputs 'outputname' -> 'output df'
    '''
    for (name, df) in iteritems(outputs):
        csv_file = '%s.csv' % name
        print(csv_file)
        with get_environment().get_output_stream(csv_file) as fp:
            if sys.version_info[0] < 3:
                fp.write(df.to_csv(index=False, encoding='utf8'))
            else:
                fp.write(df.to_csv(index=False).encode(encoding='utf8'))
    if len(outputs) == 0:
        print("Warning: no outputs written")
Esempio n. 22
0
    def notify_solution(self, sol):
        env = get_environment()
        pdata = self.current_progress_data
        context = self._context

        # 1. Start with empty table
        name_values = {}
        # 2. add predefined keys for obj, time.
        name_values['PROGRESS_CURRENT_OBJECTIVE'] = sol.objective_value

        # 3. store it (why???)
        self._kpi_dicts.append(name_values)

        # new stats for https://github.ibm.com/IBMDecisionOptimization/dd-planning/issues/2491
        if env.is_dods():
            name_values['PROGRESS_GAP'] = pdata.mip_gap
            name_values['PROGRESS_BEST_OBJECTIVE'] = pdata.best_bound
            name_values['STAT.cplex.solve.explored'] = pdata.current_nb_nodes
            name_values['STAT.cplex.solve.opened'] = pdata.remaining_nb_nodes
            name_values[
                'STAT.cplex.solve.iterationCount'] = pdata.current_nb_iterations
            name_values['STAT.cplex.solve.elapsedTime'] = pdata.time

        # add KPIs
        publish_name_fn = self.publish_name_fn
        name_values.update({
            publish_name_fn(kp.name): kp.compute(sol)
            for kp in self.model.iter_kpis()
        })
        name_values[publish_name_fn('_time')] = pdata.time

        # usually publish kpis in environment...
        if self.publish_hook is not None:
            self.publish_hook(name_values)

        # save kpis.csv table
        if auto_publishing_kpis_table_names(context) is not None:
            write_kpis_table(env=env,
                             context=context,
                             model=self.model,
                             solution=sol)
Esempio n. 23
0
def main(instances, verbose=False):
    env = Environment()
    env.print_information()

    dir_path = os.path.dirname(os.path.realpath(__file__))

    solutions = []

    for index, instance in enumerate(instances):

        data, h, w, problem_class = instance

        data = sorted(data, key=itemgetter(0), reverse=True)

        try:
            mdl = build_model(data, w, h)

            if verbose:
                print(mdl.export_to_string())
                mdl.print_information()

            mdl.export_as_lp(dir_path + "/" + mdl.name + "_" + str(index) +
                             '.lp')
            if mdl.solve(log_output=verbose):
                if verbose:
                    mdl.float_precision = 3
                    print("* model solved as function:")
                    mdl.print_solution()
                    mdl.report_kpis()

                # Save the CPLEX solution as "solution.json" program output
                with get_environment().get_output_stream(
                        dir_path + "/" + mdl.name + "_" + str(index) +
                        "_solution.json") as fp:
                    mdl.solution.export(fp, "json")
            else:
                print("* model has no solution")

        except Exception as e:
            # Problem with more than 1000 variables
            print(e)
Esempio n. 24
0
    Alternatively, Context.make_default_context() searches the PYTHONPATH for
    the following files:

        * cplex_config.py
        * cplex_config_<hostname>.py
        * docloud_config.py (must only contain context.solver.docloud configuration)

    These files contain the credentials and other properties. For example,
    something similar to::

       context.solver.docloud.url = "https://docloud.service.com/job_manager/rest/v1"
       context.solver.docloud.key = "example api_key"
    """
    url = None
    key = None

    mdl = build_diet_model()

    # Solve the model. If a key has been specified above, the solve
    # will use IBM Decision Optimization on cloud.
    if not mdl.solve(url=url, key=key):
        print("*** Problem has no solution")
    else:
        mdl.float_precision = 3
        print("* model solved as function:")
        mdl.print_solution()
        mdl.report_kpis()
        # Save the CPLEX solution as "solution.json" program output
        with get_environment().get_output_stream("solution.json") as fp:
            mdl.solution.export(fp, "json")
Esempio n. 25
0
def set_stop_callback(cb):
    env = get_environment()
    env.abort_callbacks += [cb]
# 3rd constraint: there is a fixed number of coffee shops to open.
# Total nb of open coffee shops
mdl.add_constraint(
    mdl.sum(ambulance_vars[c_loc]
            for c_loc in ambulance_locations) == nb_ambulances)

# #### Express the objective
# The objective is to minimize the total distance from libraries to coffee shops so that a book reader always gets to our coffee shop easily.
# Minimize total distance from points to hubs
total_distance = mdl.sum(
    (1 / locations_prob[b])**2 * link_vars[c_loc, b] * get_distance(c_loc, b)
    for c_loc in ambulance_locations for b in locations)
mdl.minimize(total_distance)

mdl.print_information()

assert mdl.solve(), "!!! Solve of the model fails"

# ### Step 6: Investigate the solution and then run an example analysis
# The solution can be analyzed by displaying the location of the coffee shops on a map.
total_distance = mdl.objective_value
open_ambulances = [
    c_loc for c_loc in ambulance_locations
    if ambulance_vars[c_loc].solution_value == 1
]

from docplex.util.environment import get_environment
env = get_environment()
with env.get_output_stream("ambulances.csv") as f:
    pd.DataFrame(open_ambulances).to_csv(f, index=False, quotechar='"')
Esempio n. 27
0
    context.solver.add_log_to_solution = False


Detailed description
--------------------
"""

from docplex.cp.utils import *
from docplex.cp.parameters import CpoParameters, ALL_PARAMETER_NAMES

import sys, socket, os, traceback

# Check if running in a worker environment
try:
    import docplex.util.environment as runenv
    IS_IN_WORKER = isinstance(runenv.get_environment(),
                              runenv.WorkerEnvironment)
except:
    IS_IN_WORKER = False

# CP Optimizer Interactive executable name
CPO_EXEC_INTERACTIVE = "cpoptimizer" + (".exe" if IS_WINDOWS else "")

# CP Optimizer Interactive executable name
CPO_LIBRARY = "lib_cpo_solver_12100" + (".dll" if IS_WINDOWS else ".so")

# Determine path extension to search executables
python_home = os.path.dirname(os.path.abspath(sys.executable))
if IS_WINDOWS:
    PATH_EXTENSION = [os.path.join(python_home, "Scripts")]
    appdata = os.environ.get('APPDATA')
Esempio n. 28
0
def get_line_of_model(n):
    env = get_environment()
    with env.get_input_stream('model.py') as m:
        lines = m.readlines()
        return lines[n - 1].decode("utf-8")
Esempio n. 29
0
    df_sol_starts['resources'] = df_sol_starts.index.get_level_values(
        'resources')
    df_sol_starts['periods'] = df_sol_starts.index.get_level_values('periods')
    df_sol_starts.columns = ['value', 'resources', 'periods']
    df_sol_starts = df_sol_starts.reset_index(drop=True)

    df_sol_works = df_decision_vars.work.apply(
        lambda v: v.solution_value).unstack(level='resources')
    df_sol_works = df_sol_works.stack(level='resources').to_frame()
    df_sol_works['resources'] = df_sol_works.index.get_level_values(
        'resources')
    df_sol_works['periods'] = df_sol_works.index.get_level_values('periods')
    df_sol_works.columns = ['value', 'resources', 'periods']
    df_sol_works = df_sol_works.reset_index(drop=True)

    df_sol_nr = df_decision_vars_res.nr.apply(
        lambda v: v.solution_value).to_frame()
    df_sol_nr['resources'] = df_sol_nr.index
    df_sol_nr = df_sol_nr.reset_index(drop=True)

    outputs = {}
    outputs['starts'] = df_sol_starts
    outputs['works'] = df_sol_works
    outputs['nr'] = df_sol_nr
else:
    print "  Infeasible"
    outputs = {}

from docplex.util.environment import get_environment
get_environment().store_solution(outputs)
Esempio n. 30
0
def run_NG_RAN_model_fase_3(FO_fase_1, FO_fase_2):
    """
    This method uses the topology main structure to calculate the optimal solution of the fase 3
    :rtype: This method returns the optimal value of the fase 3
    """

    print("Running Fase - 3")
    print("-----------------------------------------------------------------------------------------------------------")
    alocation_time_start = time.time()

    # read the topology data at the json file
    read_topology_500()
    # read_topology()

    # dsgs = dsg_structure()
    dsgs = dsg_structure_500()

    rus = ru_location_500()
    # rus = ru_location()

    # # print the RU's location
    # for ru in rus:
    #     print(rus[ru])

    # create the set of O's (functional splits)
    # O's(id, O_cpu, O_ram)
    O1 = Os(1, 2, 2)
    O2 = Os(2, 2, 2)
    O3 = Os(3, 2, 2)
    O4 = Os(4, 2, 2)
    O5 = Os(5, 2, 2)
    O6 = Os(6, 2, 2)
    O7 = Os(7, 2, 2)
    O8 = Os(8, 2, 2)

    # set of O's
    conj_Os = {1: O1, 2: O2, 3: O3, 4: O4, 5: O5, 6: O6}

    #set of DSG priority
    dsg_p = {1: 4, 2: 1, 4: 6, 5: 5, 6: 10, 7: 9, 8: 25, 9: 7, 10: 8}


    # create the fase 1 model
    mdl = Model(name='NGRAN Problem3', log_output=True)

    # tuple that will be used by the decision variable
    i = [(p, d, b) for p in paths for d in dsgs for b in rus if paths[p].seq[2] == rus[b].RC]

    # Decision variable X
    mdl.x = mdl.binary_var_dict(i, name='x')

    #Fase 3 Objective Function
    mdl.minimize(mdl.sum(mdl.sum(mdl.x[it] * dsg_p[it[1]] for it in i if it[1] == dsg) for dsg in dsgs))

    # Constraint fase 2
    mdl.add_constraint(mdl.sum(mdl.min(1, mdl.sum(mdl.x[it] for it in i if it[1] == dsg)) for dsg in dsgs) == FO_fase_2)

    # Constraint fase 1
    mdl.add_constraint(mdl.sum(mdl.min(1, mdl.sum(mdl.x[it] for it in i if c in paths[it[0]].seq)) for c in rcs if rcs[c].id != 0) - mdl.sum(mdl.sum(mdl.max(0, (mdl.sum(mdl.x[it] for it in i if ((o in dsgs[it[1]].Os_CU and paths[it[0]].seq[0] == rcs[c].id) or (o in dsgs[it[1]].Os_DU and paths[it[0]].seq[1] == rcs[c].id) or (o in dsgs[it[1]].Os_RU and paths[it[0]].seq[2] == rcs[c].id))) - 1)) for o in conj_Os) for c in rcs) == FO_fase_1)

    # Constraint 1 (4)
    for b in rus:
        mdl.add_constraint(mdl.sum(mdl.x[it] for it in i if it[2] == b) == 1, 'unicity')

    # Constrains 1.1 (N)
    mdl.add_constraint(mdl.sum(mdl.x[it] for it in i if paths[it[0]].target != rus[it[2]].RC) == 0, 'path')

    # constraint 1.2 (N) quebras de 2 so pode escolher caminhos de 2 quebras
    mdl.add_constraint(mdl.sum(mdl.x[it] for it in i if paths[it[0]].seq[0] != 0 and (
                it[1] == 6 or it[1] == 7 or it[1] == 8 or it[1] == 9 or it[1] == 10)) == 0, 'dsgs_path_pick')

    # constraint 1.3 (N) quebras de 3 so pode escolher caminhos de 3 quebras
    mdl.add_constraint(mdl.sum(mdl.x[it] for it in i if
                               paths[it[0]].seq[0] == 0 and it[1] != 6 and it[1] != 7 and it[1] != 8 and it[1] != 9 and
                               it[1] != 10) == 0, 'dsgs_path_pick2')

    # contraint 1.4 (N) quebras de 1 so pode escolher caminhos de 1 quebras
    mdl.add_constraint(
        mdl.sum(mdl.x[it] for it in i if paths[it[0]].seq[0] == 0 and paths[it[0]].seq[1] == 0 and it[1] != 8) == 0,
        'dsgs_path_pick3')

    # contraint 1.5 (N) caminhos de 2 RC's nao podem usar D-RAN
    mdl.add_constraint(
        mdl.sum(mdl.x[it] for it in i if paths[it[0]].seq[0] == 0 and paths[it[0]].seq[1] != 0 and it[1] == 8) == 0,
        'dsgs_path_pick4')

    # #constraint 1.6 (N) caminhos devem ir para o RC que esta posicionado o RU
    for ru in rus:
        mdl.add_constraint(
            mdl.sum(mdl.x[it] for it in i if paths[it[0]].seq[2] != rus[ru].RC and it[2] == rus[ru].id) == 0)

    # Constraint 2 (5)
    for l in links:
        for k in links:
            if l[0] == k[1] and l[1] == k[0]:
                break
        mdl.add_constraint(mdl.sum(mdl.x[it] * dsgs[it[1]].bw_BH for it in i if l in paths[it[0]].p1) + mdl.sum(
            mdl.x[it] * dsgs[it[1]].bw_MH for it in i if l in paths[it[0]].p2) + mdl.sum(
            mdl.x[it] * dsgs[it[1]].bw_FH for it in i if l in paths[it[0]].p3) +
                           mdl.sum(mdl.x[it] * dsgs[it[1]].bw_BH for it in i if k in paths[it[0]].p1) + mdl.sum(
            mdl.x[it] * dsgs[it[1]].bw_MH for it in i if k in paths[it[0]].p2) + mdl.sum(
            mdl.x[it] * dsgs[it[1]].bw_FH for it in i if k in paths[it[0]].p3)
                           <= capacity[l], 'links_bw')

    # Constraint 3 (6)
    for it in i:
        mdl.add_constraint((mdl.x[it] * paths[it[0]].delay_p1) <= dsgs[it[1]].delay_BH, 'delay_req_p1')

    # Constraint 4 (7)
    for it in i:
        mdl.add_constraint((mdl.x[it] * paths[it[0]].delay_p2) <= dsgs[it[1]].delay_MH, 'delay_req_p2')

    # Constraint 5 (8)
    for it in i:
        mdl.add_constraint((mdl.x[it] * paths[it[0]].delay_p3 <= dsgs[it[1]].delay_FH), 'delay_req_p3')

    # Constraint 6 (9)
    for c in rcs:
        mdl.add_constraint(
            mdl.sum(mdl.x[it] * dsgs[it[1]].cpu_CU for it in i if c == paths[it[0]].seq[0]) + mdl.sum(
                mdl.x[it] * dsgs[it[1]].cpu_DU for it in i if c == paths[it[0]].seq[1]) + mdl.sum(
                mdl.x[it] * dsgs[it[1]].cpu_RU for it in i if c == paths[it[0]].seq[2]) <= rcs[c].cpu,
            'rcs_cpu_usage')

    # Constraint 7 (9) RAM
    # for c in rcs:
    #     mdl.add_constraint(
    #         mdl.sum(mdl.x[it] * dsgs[it[1]].ram_CU for it in i if c == paths[it[0]].seq[0]) + mdl.sum(
    #             mdl.x[it] * dsgs[it[1]].ram_DU for it in i if c == paths[it[0]].seq[1]) + mdl.sum(
    #             mdl.x[it] * dsgs[it[1]].ram_RU for it in i if c == paths[it[0]].seq[2]) <= rcs[c].ram,
    #         'rcs_ram_usage')

    alocation_time_end = time.time()
    start_time = time.time()

    warm_start = mdl.new_solution()
    for it in f2_vars:
        warm_start.add_var_value(mdl.x[it], 1)
    #warm_start.set_objective_value(3)
    print(warm_start)

    mdl.add_mip_start(warm_start)

    mdl.solve()
    end_time = time.time()
    print("Stage 3 - Alocation Time: {}".format(alocation_time_end - alocation_time_start))
    print("Stage 3 - Enlapsed Time: {}".format(end_time - start_time))

    for it in i:
        if mdl.x[it].solution_value == 1:
            print("x{} -> {}".format(it, mdl.x[it].solution_value))
            print(paths[it[0]].seq)

    with get_environment().get_output_stream("solution.json") as fp:
        mdl.solution.export(fp, "json")

    disp_Os = {}

    for rc in rcs:
        disp_Os[rc] = {"O1": 0, "O2": 0, "O3": 0, "O4": 0, "O5": 0, "O6": 0, "O7": 0, "O8": 0}

    for it in i:
        for rc in rcs:
            if mdl.x[it].solution_value == 1:
                if rc in paths[it[0]].seq:
                    seq = paths[it[0]].seq
                    if rc == seq[0]:
                        os = dsgs[it[1]].Os_CU
                        for o in os:
                            if o != 0:
                                dct = disp_Os[rc]
                                dct["O{}".format(o)] += 1
                                disp_Os[rc] = dct

                    if rc == seq[1]:
                        os = dsgs[it[1]].Os_DU
                        for o in os:
                            if o != 0:
                                dct = disp_Os[rc]
                                dct["O{}".format(o)] += 1
                                disp_Os[rc] = dct

                    if rc == seq[2]:
                        os = dsgs[it[1]].Os_RU
                        for o in os:
                            if o != 0:
                                dct = disp_Os[rc]
                                dct["O{}".format(o)] += 1
                                disp_Os[rc] = dct

    print("FO: {}".format(mdl.solution.get_objective_value()))

    for rc in disp_Os:
        print(str(rc) + str(disp_Os[rc]))

if __name__ == '__main__':
    """DOcplexcloud credentials can be specified with url and api_key in the code block below.

    Alternatively, Context.make_default_context() searches the PYTHONPATH for
    the following files:

        * cplex_config.py
        * cplex_config_<hostname>.py
        * docloud_config.py (must only contain context.solver.docloud configuration)

    These files contain the credentials and other properties. For example,
    something similar to::

       context.solver.docloud.url = "https://docloud.service.com/job_manager/rest/v1"
       context.solver.docloud.key = "example api_key"
    """
    url = None
    key = None

    cutstock_model = DefaultCutStockMasterModel()
    
    # Solve the model. If a key has been specified above, the solve
    # will use IBM Decision Optimization on cloud.
    if cutstock_model.run(url=url, key=key):
        cutstock_model.print_solution()
        # Save the solution as "solution.json" program output.
        with get_environment().get_output_stream("solution.json") as fp:
            cutstock_model.save_solution_as_json(fp)