예제 #1
0
def solve_bvp_4d(
    pt1, pt2
):  # 4-D 2-pt BVP for double integrator, fixed init/final state, via uncoupled 2-D solving
    if pt2.x > pt1.x:  # u(t) optimal bang-bang controller +- order, first attempt determined by x direction
        u_x = 1.0  # sign of first control input u_x
    else:
        u_x = -1.0
    if pt2.y > pt1.y:  # u(t) optimal bang-bang controller +- order, first attempt determined by y direction
        u_y = 1.0  # sign of first control input u_y
    else:
        u_y = -1.0
    ts_x, tf_x, u_x = call_bvp_2d(
        u_x, pt1, pt2, 'x',
        'x_vel')  # solve for optimal trajectory for x dimensions
    ts_y, tf_y, u_y = call_bvp_2d(
        u_y, pt1, pt2, 'y',
        'y_vel')  # solve for optimal trajectory for y dimensions
    ts1_x, ts1_y, tf, u_x, u_y = call_fixed(
        u_x, u_y, tf_x, tf_y, ts_x, ts_y, pt1,
        pt2)  # choose 2d BVP soln that has larger tf
    # bang-bang controller is saved same way as bang-off-bang with two switch times that are equal
    trajectory = Trajectory(u_x, ts1_x, u_y, ts1_y, tf)
    get_discrete_states(pt1, pt2, trajectory)
    get_equi_time_discrete_states(pt1, pt2, trajectory)
    return trajectory
예제 #2
0
    def aggressivenessToKeep(self, refExecution):
        effortCap = refExecution['trajectory'].getLastEffort()
        valueCap = refExecution['trajectory'].getResult()
        executions = self.data.getExecutions(refExecution['instance-id'], False, refExecution['id'] - 1)
        if len(executions) == 0: return None

        steps = []
        ratios = []
        for exe in executions:
            steps.append(len([point for point in exe['trajectory'].points if point.effort >= effortCap]))
            if exe['trajectory'].getValue(effortCap) != 0:
                ratios.append(exe['trajectory'].getResult() / exe['trajectory'].getValue(effortCap))
        medianSteps = ceil(statistics.median(steps))
        medianRatio = statistics.median(ratios)

        trajectory = Trajectory(0, 0, 0, [])
        trajectory.points = refExecution['trajectory'].points
        if medianSteps > 0:
            effortStep = (self.data.scenario['effort-limit'] - effortCap) / (medianSteps + 1)
            finalValue = valueCap * medianRatio
            valueStep = (valueCap - finalValue) / medianSteps
            
            lastEffort = effortCap
            lastValue = valueCap
            for i in range(medianSteps):
                lastEffort += effortStep
                lastValue -= valueStep
                trajectory.points.append(Point(lastEffort, lastValue))

        executions.sort(key = lambda x : x['trajectory'].getResult())
        amount = 0
        for i in range(len(executions)):
            bound = i + 1
            efforts = []
            [efforts.extend([point.effort for point in exe['trajectory'].points if point.effort not in efforts]) for exe in executions[:bound]]
            efforts.sort()
            envelope = Trajectory(0, 0, 0, [])
            for effort in efforts:
                envelope.addPoint(Point(effort, max([exe['trajectory'].getValue(effort) for exe in executions[:bound]])), self.data.scenario['effort-limit'])
            efforts = efforts + [point.effort for point in trajectory.points if point.effort not in efforts]
            cap = False
            for effort in efforts:
                if trajectory.getValue(effort) > envelope.getValue(effort):
                    cap = True
                    break
            if cap:
                amount += 1
            else:
                amount += 1
                break
        return 1 - (amount / len(executions))
예제 #3
0
 def aggregateConfigurations(self, trajectories):
     envelope = Trajectory(0, 0, 0, [])
     efforts = []
     [efforts.extend([point.effort for point in trajectory.points if point.effort not in efforts]) for trajectory in trajectories]
     efforts.sort()
     for effort in efforts:
         if self.data.scenario['ac'] == "best":
             envelope.points.append(Point(effort, min([trajectory.getValue(effort) for trajectory in trajectories])))
         if self.data.scenario['ac'] == "worst":
             envelope.points.append(Point(effort, max([trajectory.getValue(effort) for trajectory in trajectories])))
     return envelope
예제 #4
0
 def aggregate(self, replications):
     result = Trajectory(0, 0, 0, [])
     if self.data.scenario['ar'] == "exp":
         return self.exponentialModel(replications)
     efforts = []
     [efforts.extend([point.effort for point in replication.points if point.effort not in efforts]) for replication in replications]
     efforts.sort()
     for effort in efforts:
         if self.data.scenario['ar'] == "best":
             result.points.append(Point(effort, min([replication.getValue(effort) for replication in replications])))
         elif self.data.scenario['ar'] == "worst":
             result.points.append(Point(effort, max([replication.getValue(effort) for replication in replications])))
     return result
예제 #5
0
    def getTrajectoryFromString(self, description, instanceId, candidateId,
                                seed):
        points = []
        description = description[1:-1]
        if len(description) > 0:
            content = description.split("/")
            for point in content:
                parts = point.split(",")
                effort = float(parts[0])
                value = float(parts[1])
                points.append(Point(effort, value))

        return Trajectory(candidateId, instanceId, seed, points)
예제 #6
0
 def getEnvelope(self):
     if self.data.execution['iteration'] <= 1: return "<first iteration>"
     if self.data.scenario['strategy'] == "elitist":
         if self.data.execution['candidate-id'] in self.data.execution['irace-elites']: return "<evaluating elite candidate>"
         elites = self.selectElites()
         aggregatedReplications = self.aggregateReplications(elites)
         if len(aggregatedReplications) == 0: return "<no previous executions>"
         envelope = self.aggregateConfigurations(aggregatedReplications)
         envelope.clean()
         return envelope
     if self.data.scenario['strategy'] == "adaptive":
         executions = self.data.getExecutions(self.data.execution['instance-id'], False)
         if len(executions) == 0: return "<no previous execution>"
         executions.sort(key = lambda x : x['trajectory'].getResult())
         index = max(1, ceil((1 - self.data.execution['aggressiveness']) * len(executions)))
         executions = executions[:index]
         efforts = []
         [efforts.extend([point.effort for point in execution['trajectory'].points if point.effort not in efforts]) for execution in executions]
         efforts.sort()
         envelope = Trajectory(0, 0, 0, [])
         for effort in efforts:
             envelope.addPoint(Point(effort, max([execution['trajectory'].getValue(effort) for execution in executions])), self.data.scenario['effort-limit'])
         return envelope
     return "<no strategy defined>"
예제 #7
0
 def aggressivenessToCap(self, refExecution):
     executions = self.data.getExecutions(refExecution['instance-id'], False, refExecution['id'] - 1)
     if len(executions) == 0: return None
     executions.sort(key = lambda x : x['trajectory'].getResult())
     amount = 0
     for i in range(len(executions)):
         bound = i + 1
         efforts = []
         [efforts.extend([point.effort for point in exe['trajectory'].points if point.effort not in efforts]) for exe in executions[:bound]]
         efforts.sort()
         envelope = Trajectory(0, 0, 0, [])
         for effort in efforts:
             envelope.addPoint(Point(effort, max([exe['trajectory'].getValue(effort) for exe in executions[:bound]])), self.data.scenario['effort-limit'])
         efforts = efforts + [point.effort for point in refExecution['trajectory'].points if point.effort not in efforts]
         cap = False
         for effort in efforts:
             if refExecution['trajectory'].getValue(effort) > envelope.getValue(effort):
                 cap = True
                 break
         if cap:
             amount += 1
         else:
             break
     return 1 - (amount / len(executions))
예제 #8
0
 def exponentialModel(self, replications):
     result = Trajectory(0, 0, 0, [])
     values = []
     for replication in replications:
         for point in replication.points:
             value = point.value
             if value not in values:
                 values.append(value)
     values.sort(reverse = True)
     for value in values:
         meanEffort = 0
         for replication in replications:
             penaltyValue = self.data.scenario['effort-limit'] * self.data.scenario['alpha']
             meanEffort += replication.getEffort(value, penaltyValue)
         meanEffort = meanEffort / len(replications)
         effort = -log(1 - (1 - self.data.scenario['p'])) * meanEffort
         point = Point(effort, value)
         result.points.append(point)
     return result
예제 #9
0
def runAlgorithm(command):
    global capping, data
    trajectory = Trajectory(data.execution['candidate-id'],
                            data.execution['instance-id'],
                            data.execution['seed'], [])
    startTime = time.time()
    status = None

    directory = "execution-c" + str(
        data.execution['candidate-id']) + "-i" + str(
            data.execution['instance-id']) + "-s" + str(data.execution['seed'])
    os.mkdir(directory)
    fileOutput = open(directory + "/tempOut.dat", "w+")
    fileError = open(directory + "/tempErr.dat", "w+")
    readOutput = open(directory + "/tempOut.dat", "r")

    process = subprocess.Popen(command,
                               stdout=fileOutput,
                               stderr=fileError,
                               env=os.environ,
                               shell=True)
    executionEffort = 0
    while process.poll() is None:
        output = readOutput.readlines()
        point = None
        if len(output) > 0:
            point = parseOutput(output, time.time() - startTime)
            if point is not None:
                trajectory.addPoint(point, data.scenario['effort-limit'])
        executionEffort = max(
            executionEffort,
            (time.time() - startTime if data.scenario['effort-type'] == "time"
             else point.effort if point is not None else
             trajectory.getLastEffort() if not trajectory.isEmpty() else 0))
        executionEffort = min(executionEffort, data.scenario['effort-limit'])

        if data.scenario['external-halt'] and executionEffort == data.scenario[
                'effort-limit']:
            killProcess(process.pid)
            status = 0
            break

        if data.scenario['capping']:
            if capping.cap(executionEffort, trajectory):
                trajectory.addCappedPoint(
                    Point(min(data.scenario['effort-limit'], executionEffort),
                          trajectory.getResult()))
                killProcess(process.pid)
                status = 11
                break

        if data.scenario['effort-type'] == "time": time.sleep(0.5)

    if status is None:
        status = process.poll()

    if status == 0:
        output = readOutput.readlines()
        if len(output) > 0:
            point = parseOutput(output, time.time() - startTime)
            if point is not None:
                trajectory.addPoint(point, data.scenario['effort-limit'])
                executionEffort = point.effort
        result = "ok" if checkFeasibility(fileOutput) else "infeasible"
        value = trajectory.getResult(
        ) if result != "infeasible" else sys.maxsize
        if data.scenario['budget-type'] == "timeout" or data.estimationRun:
            print(str(value) + ' ' + str(time.time() - startTime))
        else:
            print(value)
    elif status == 11:
        result = "capped" if checkFeasibility(fileOutput) else "infeasible"
        value = trajectory.getResult() if (
            result == "capped"
            and data.scenario['penalty'] == "best-so-far") else sys.maxsize
        if data.scenario['budget-type'] == "timeout":
            print(str(value) + ' ' + str(time.time() - startTime))
        else:
            print(value)
    else:
        exit(status)

    fileOutput.close()
    fileError.close()
    readOutput.close()
    shutil.rmtree(directory)

    data.finish(executionEffort, result, trajectory, capping)