예제 #1
0
def solve(model, name,params=Params()):
    path = model.lp.solver.path
    
    if (params.solveur=="coin"):
        if (params.logSolveur=="on"):
          
            model.lp.setSolver(solvers.PULP_CBC_CMD(msg=0,fracGap=params.solveurToleranceRelative,maxSeconds=params.solveurMaxTime,keepFiles=1))
            
        else:
            model.lp.setSolver(solvers.PULP_CBC_CMD(msg=0,fracGap=params.solveurToleranceRelative,maxSeconds=params.solveurMaxTime,keepFiles=1))
    else:
        path=params.pathCplex 
        options=[" set mip tol mipgap  "+str(params.solveurToleranceRelative)]
        options+=[" set timelimit "+str(params.solveurMaxTime)+"\n"]
        if (params.logSolveur=="on"):
            model.lp.setSolver(solvers.CPLEX_CMD(path,msg=1,keepFiles=1,options=options))
        else:
            model.lp.setSolver(solvers.CPLEX_CMD(path,msg=0,keepFiles=1,options=options))
    solutionTime =- time.time()
    model.lp.solve()
    solutionTime+=time.time()
    
    model.lp.writeLP(name+".lp")
    model.indicators["solutionTime"]=solutionTime
    getIndicators(model)
예제 #2
0
def is_redundant(constraints, paths, c):
    filtered = [cc for cc in constraints if cc.cid != c.cid]
    xs, prob = construct_prob(len(paths), filtered)
    prob += c.expr(xs)

    prob.solve(solvers.PULP_CBC_CMD(fracGap=10))
    return prob.status == 1 and value(prob.objective) <= c.bound
예제 #3
0
 def solve(self, time_limit):
     try:
         # New PuLP needs this
         self.model.solve(solvers.COIN_CMD(maxSeconds=time_limit))
     except solvers.PulpSolverError:
         # Old PuLP needs this
         self.model.solve(solvers.PULP_CBC_CMD(maxSeconds=time_limit))
     return self.process_solution()
예제 #4
0
    def model(self):
        # Run a threshold model on a provided demand and response_area layer

        self._create_run_dirs()

        # TODO: Figure out why it is necessary to reload here rather than just using the layer.
        block_layer = QgsVectorLayer(self.paths['block_shp_output'], "block_layer", "ogr")
        response_area_layer = QgsVectorLayer(self.paths['responder_shp_output'], "response_area_layer", "ogr")
        if self.logger:
            self.logger.info("Reloaded layers from file. Found {} blocks and {} response areas."
                             .format(len(list(block_layer.getFeatures())),
                              len(list(response_area_layer.getFeatures()))))

        binary_coverage_polygon = pyqgis_analysis.generate_partial_coverage(block_layer, response_area_layer,
                                                                            "demand", "point_id", "area_id")
        # TODO: Build a handler which selects the proper model based on config
        # TODO: Move this code into a function (class?) for partial_coverage_threshold
        # Create the mclp model
        if self.logger:
            self.logger.info("Creating MCLP model...")
        mclp = covering.create_cc_threshold_model(binary_coverage_polygon, self.model_run_config['thresholds'])
        # Solve the model using GLPK
        if self.logger:
            self.logger.info("Solving MCLP...")

        mclp.solve(solvers.PULP_CBC_CMD())
        self.problem = mclp
        if pulp.LpStatus[mclp.status] == "Infeasible":
            print(pulp.LpStatus[mclp.status])
            print("Model run {} deemed infeasible. Skipping...".format(self.model_run_config['run_name']))
            self.status = pulp.LpStatus[mclp.status]
            self.results.parse_model_output(self)
            return

        # TODO: Move result extraction into it's own function (or tie to partial_coverage_model object)
        if self.logger:
            self.logger.info("Extracting results")
        ids = utilities.get_ids(mclp, "responder_layer")
        self.area_ids = ids
        point_ids = [str(ft['from_point']) for ft in list(response_area_layer.getFeatures()) if str(ft['area_id']) in ids]
        self.point_ids = point_ids
        # Generate a query that could be used as a definition query or selection in arcpy
        select_query = pyqgis_analysis.generate_query(ids, unique_field_name="area_id")
        point_select_query = pyqgis_analysis.generate_query(point_ids, unique_field_name="point_id")
        if self.logger:
            self.logger.info("Output query to use to generate response area maps is: {}".format(select_query))
            self.logger.info("Output query to use to generate response point maps is: {}".format(point_select_query))
        # Determine how much demand is covered by the results
        self.selected_points = SelectedPointsLayer().copy(RoadPointLayer(layer=self.road_points))
        self.selected_points.layer.setSubsetString(point_select_query)
        self.selected_areas = SelectedAreasLayer().copy(ResponderLayer(layer=self.response_areas))
        self.selected_areas.layer.setSubsetString(select_query)
        self.results.parse_model_output(self)
        # TODO: Fix calculation of covered demand and add to output
        #total_coverage = pyqgis_analysis.get_covered_demand(block_layer, "demand", "partial",
        #                                                    response_area_layer)
        if self.logger:
            # self.logger.info(
            # "{0:.2f}% of demand is covered".format((100 * total_coverage) / binary_coverage_polygon["totalDemand"]))
            self.logger.info("{} responders".format(len(ids)))
        _write_shp_file(self.selected_areas.layer, self.paths['model_result_shp_output'])
        _write_shp_file(self.selected_points.layer, self.paths['selected_points_shp_output'])
        self._write_qgs_project()

        return self
예제 #5
0
    def solve(self, hosts, filter_properties):
        """This method returns a list of tuples - (host, instance_uuid)
        that are returned by the solver. Here the assumption is that
        all instance_uuids have the same requirement as specified in
        filter_properties.
        """
        host_instance_combinations = []

        num_instances = filter_properties['num_instances']
        num_hosts = len(hosts)

        instance_uuids = filter_properties.get('instance_uuids') or [
            '(unknown_uuid)' + str(i) for i in xrange(num_instances)
        ]

        filter_properties.setdefault('solver_cache', {})
        filter_properties['solver_cache'].update({
            'cost_matrix': [],
            'constraint_matrix': []
        })

        cost_matrix = self._get_cost_matrix(hosts, filter_properties)
        cost_matrix = self._adjust_cost_matrix(cost_matrix)
        constraint_matrix = self._get_constraint_matrix(
            hosts, filter_properties)

        # Create dictionaries mapping temporary host/instance keys to
        # hosts/instance_uuids. These temorary keys are to be used in the
        # solving process since we need a convention of lp variable names.
        host_keys = ['Host' + str(i) for i in xrange(num_hosts)]
        host_key_map = dict(zip(host_keys, hosts))
        instance_num_keys = [
            'InstanceNum' + str(i) for i in xrange(num_instances + 1)
        ]
        instance_num_key_map = dict(
            zip(instance_num_keys, xrange(num_instances + 1)))

        # create the pulp variables
        variable_matrix = [[
            pulp.LpVariable('HI_' + host_key + '_' + instance_num_key, 0, 1,
                            constants.LpInteger)
            for instance_num_key in instance_num_keys
        ] for host_key in host_keys]

        # create the 'prob' variable to contain the problem data.
        prob = pulp.LpProblem("Host Instance Scheduler Problem",
                              constants.LpMinimize)

        # add cost function to pulp solver
        cost_variables = [
            variable_matrix[i][j] for i in xrange(num_hosts)
            for j in xrange(num_instances + 1)
        ]
        cost_coefficients = [
            cost_matrix[i][j] for i in xrange(num_hosts)
            for j in xrange(num_instances + 1)
        ]
        prob += (pulp.lpSum([
            cost_coefficients[i] * cost_variables[i]
            for i in xrange(len(cost_variables))
        ]), "Sum_Costs")

        # add constraints to pulp solver
        for i in xrange(num_hosts):
            for j in xrange(num_instances + 1):
                if constraint_matrix[i][j] is False:
                    prob += (variable_matrix[i][j] == 0,
                             "Cons_Host_%s" % i + "_NumInst_%s" % j)

        # add additional constraints to ensure the problem is valid
        # (1) non-trivial solution: number of all instances == that requested
        prob += (pulp.lpSum([
            variable_matrix[i][j] * j for i in xrange(num_hosts)
            for j in xrange(num_instances + 1)
        ]) == num_instances, "NonTrivialCons")
        # (2) valid solution: each host is assigned 1 num-instances value
        for i in xrange(num_hosts):
            prob += (pulp.lpSum([
                variable_matrix[i][j] for j in xrange(num_instances + 1)
            ]) == 1, "ValidCons_Host_%s" % i)

        # The problem is solved using PULP's choice of Solver.
        prob.solve(
            pulp_solver_classes.PULP_CBC_CMD(
                maxSeconds=CONF.solver_scheduler.pulp_solver_timeout_seconds))

        # Create host-instance tuples from the solutions.
        if pulp.LpStatus[prob.status] == 'Optimal':
            num_insts_on_host = {}
            for v in prob.variables():
                if v.name.startswith('HI'):
                    (host_key, instance_num_key
                     ) = v.name.lstrip('HI').lstrip('_').split('_')
                    if v.varValue == 1:
                        num_insts_on_host[host_key] = (
                            instance_num_key_map[instance_num_key])
            instances_iter = iter(instance_uuids)
            for host_key in host_keys:
                num_insts_on_this_host = num_insts_on_host.get(host_key, 0)
                for i in xrange(num_insts_on_this_host):
                    host_instance_combinations.append(
                        (host_key_map[host_key], instances_iter.next()))
        else:
            LOG.warn(
                _LW("Pulp solver didnot find optimal solution! "
                    "reason: %s"), pulp.LpStatus[prob.status])
            host_instance_combinations = []

        return host_instance_combinations
예제 #6
0
 def solve(self):
     self.prob.solve(solvers.PULP_CBC_CMD())
예제 #7
0
    def __init__(self, num_employees, num_shifts, num_roles, num_days, employee_info, management_data, training, schedule):
        self.num_employees, self.num_shifts, self.num_roles, self.num_days = num_employees, \
                                                                             num_shifts, num_roles, num_days
        # x holds the main variables
        # employee, role, day, shift
        self.x = x = VarMatrix("x", [num_employees, num_roles, num_days, num_shifts])

        self.management_data = management_data

        self.prob = prob = LpProblem("Schedule", LpMaximize)

        self.schedule = schedule

        pprint.pprint(schedule.to_dict())

        shifts_by_day = get_shifts_by_day(schedule.days, schedule.shifts)
        pprint.pprint(shifts_by_day)
        #print('Management Data ---------------------------')
        #pprint.pprint(management_data)
        #print('Shifts ---------------------')
        #pprint.pprint(shifts)
        #print('Employee Information ----------------------')
        #pprint.pprint(employee_info)
        #print('Schedule ----------------------')
        #pprint.pprint(schedule.to_dict())

        # correct number of employees in each shift
        for role, day, shift in product_range(num_roles, num_days, num_shifts):

            if day >= len(shifts_by_day):
                continue

            if shift >= len(shifts_by_day[day]):
                continue
                
            if schedule.roles[str(role)] == shifts_by_day[day][shift]['role']:
                shift_info = shifts_by_day[day][shift]
                prob += lpSum(x[employee][role][day][shift] for employee in range(num_employees)) \
                    == shift_info['num_employees']
                print("Assigned num_emps: {} to shift: {}".format(shift_info['num_employees'],
                                                                  shift_info['role'] + ': {} - {}'.format(shift_info['start'], shift_info['end'])))

        # min/max shifts
        for employee in range(num_employees):
            prob += lpSum(x[employee][role][day][shift]
                          for role, day, shift in product_range(num_roles, num_days, num_shifts)) \
                    >= employee_info[employee]["min_shifts"]
            prob += lpSum(x[employee][role][day][shift]
                          for role, day, shift in product_range(num_roles, num_days, num_shifts)) \
                    <= employee_info[employee]["max_shifts"]

        # one shift per day
        for employee, day in product_range(num_employees, num_days):
            prob += lpSum(x[employee][role][day][shift] for role, shift in product_range(num_roles, num_shifts)) <= 1

        # no more than one person training per shift/role
        for role, day, shift in product_range(num_roles, num_days, num_shifts):
            prob += lpSum(x[employee][role][day][shift] for employee in range(num_employees) if training[employee][role]) <= 1

        # zero on roles
        for employee, role in product_range(num_employees, num_roles):
            if employee_info[employee]["role_seniority"][role] == 0:
                # Becuase employee has seniority 0, it is assumed they are not able to
                # work this role at all
                prob += lpSum(x[employee][role][day][shift] for day, shift in product_range(num_days, num_shifts)) == 0
        '''
        # not evening then morning
        for employee, day in product_range(num_employees, num_days-1):
            prob += lpSum(x[employee][role][day][-1] + x[employee][role][day+1][0] for role in range(num_roles)) <= 1, ""
        '''
        '''
        # Zephyr: not more than role/shift per week
        for employee, role, shift in product_range(num_employees, num_roles, num_shifts):
            prob += lpSum(x[employee][role][day][shift] for day in range(num_days)) <= 2, ""
        '''
        def coeff(employee, role, day, shift):

            if day >= len(shifts_by_day):
                return -7500
            elif shift >= len(shifts_by_day[day]):
                return -7500
            else:
                if schedule.roles[str(role)] == shifts_by_day[day][shift]['role']:

                    if employee_info[employee]["shift_pref"][day][shift]["lock_in_role"] == role:
                        c = 1000
                    else:
                        if employee_info[employee]["shift_pref"][day][shift]["pref"] == 5:
                            c = 5
                        elif employee_info[employee]["shift_pref"][day][shift]["pref"] == 1:
                            c = 1
                        elif employee_info[employee]["shift_pref"][day][shift]["pref"] == -1000:
                            c = -1000
                        else:
                            print(employee_info[employee]["shift_pref"][day][shift]["pref"])
                            raise ValueError("`employee_info` array had a bad pref value for employee", employee, "day", day, "shift", shift)
                    print("S: {} | C: {}".format(employee_info[employee]["role_seniority"][role], c))
                    c *= employee_info[employee]["role_seniority"][role]
                else:
                    c = -7500
                return c

        self.coeff = coeff

        prob += lpSum(coeff(employee, role, day, shift)*x[employee][role][day][shift]
                      for employee, role, day, shift in product_range(num_employees, num_roles, num_days, num_shifts))

        prob.solve(solvers.PULP_CBC_CMD())