def run(self, seed): random.seed(seed) Sim.initialize() s = Arrival(sim=self) self.parking = Sim.Monitor(ylab=’cars’, tlab=’time’, sim=self) self.activate(s, s.generate(), at=0.0) self.simulate(until=G.maxTime)
def __init__(self, id_, resourceCapacity, serviceTime, serviceTimeModel): self.id = id_ self.serviceTime = serviceTime self.serviceTimeModel = serviceTimeModel self.queueResource = Simulation.Resource(capacity=resourceCapacity, monitored=True) self.serverRRMonitor = Simulation.Monitor(name="ServerMonitor")
def run(self, aseed): random.seed(seed) Sim.initialize() s = Arrival(name='Arrivals', sim=self) self.parking = Sim.Monitor(name='Parking', ylab='cars', tlab='time', sim=self) self.activate(s, s.generate(), at=0.0) self.simulate(until=G.maxTime)
def run(self, aseed): random.seed(aseed) agents = Sim.Resource(capacity=F.numAgents, name="Service Agents", unitName="Agent", monitored=True, sim=self) specialagents = Sim.Resource(capacity=F.numSpecialists, name="Specialist Agents", unitName="Specialist", monitored=True, sim=self) agentspm = Sim.Resource(capacity=F.numAgentsPM, name="Service AgentsPM", unitName="Agent", monitored=True, sim=self) specialagentspm = Sim.Resource(capacity=F.numSpecialistsPM, name="Specialist AgentsPM", unitName="Specialist", monitored=True, sim=self) self.meanTBA = 0.0 self.initialize() s = Source('Source', sim=self) a = Arrival('Arrival Rate', sim=self) tchange = SecondShift('PM Shift', sim=self) self.Regular10 = Sim.Monitor(name="Regular time", ylab='hours', sim=self) self.Special10 = Sim.Monitor(name="Special time", ylab='hours', sim=self) self.activate(a, a.generate(F.aRateperhour)) self.activate(s, s.generate(resourcenormal=agents, resourcespecial=specialagents, resourcenormalPM=agentspm, resourcespecialPM=specialagentspm), at=0.0) self.activate(tchange, tchange.generate(F.tPMshiftchange)) self.simulate(until=F.maxTime)
def model(counterseed=3939393): global counter, counterRV, waitMonitor counter = Simulation.Resource(name="Clerk", capacity=1) counterRV = Random(counterseed) waitMonitor = Simulation.Monitor() Simulation.initialize() sourceseed = 1133 source = Source(seed=sourceseed) Simulation.activate(source, source.generate(100, 10.0)) ob = Observer() Simulation.activate(ob, ob.observe()) Simulation.simulate(until=2000.0)
# Model def model(counterseed=3939393): global counter, counterRV, waitMonitor counter = Simulation.Resource(name="Clerk", capacity=1) counterRV = Random(counterseed) waitMonitor = Simulation.Monitor() Simulation.initialize() sourceseed = 1133 source = Source(seed=sourceseed) Simulation.activate(source, source.generate(100, 10.0)) ob = Observer() Simulation.activate(ob, ob.observe()) Simulation.simulate(until=2000.0) qu = Simulation.Monitor(name="Queue length") wate = Simulation.Monitor(name="Wait time") # Experiment data sourceSeed = 333 # Experiment model() # Output pyl.figure(figsize=(5.5, 4)) pyl.plot(qu.tseries(), qu.yseries()) pyl.title("Bank12: queue length over time", fontsize=12, fontweight="bold") pyl.xlabel("time", fontsize=9, fontweight="bold") pyl.ylabel("queue length before counter", fontsize=9, fontweight="bold") pyl.grid(True) pyl.show() pyl.savefig("./bank12.png") print("Saved graph in current directory as bank12.png")
def runExperiment(args): # Set the random seed random.seed(args.seed) numpy.random.seed(args.seed) Simulation.initialize() servers = [] clients = [] workloadGens = [] muUpdaters = [] constants.NW_LATENCY_BASE = args.nwLatencyBase constants.NW_LATENCY_MU = args.nwLatencyMu constants.NW_LATENCY_SIGMA = args.nwLatencySigma constants.NUMBER_OF_CLIENTS = args.numClients assert args.expScenario != "" serviceRatePerServer = [] if (args.expScenario == "base"): # Start the servers for i in range(args.numServers): serv = server.Server(i, resourceCapacity=args.serverConcurrency, serviceTime=(args.serviceTime), serviceTimeModel=args.serviceTimeModel) servers.append(serv) elif (args.expScenario == "multipleServiceTimeServers"): # Start the servers for i in range(args.numServers): serv = server.Server(i, resourceCapacity=args.serverConcurrency, serviceTime=((i + 1) * args.serviceTime), serviceTimeModel=args.serviceTimeModel) servers.append(serv) elif (args.expScenario == "heterogenousStaticServiceTimeScenario"): baseServiceTime = args.serviceTime assert args.slowServerFraction >= 0 and args.slowServerFraction < 1.0 assert args.slowServerSlowness >= 0 and args.slowServerSlowness < 1.0 assert not (args.slowServerSlowness == 0 and args.slowServerFraction != 0) assert not (args.slowServerSlowness != 0 and args.slowServerFraction == 0) if (args.slowServerFraction > 0.0): slowServerRate = (args.serverConcurrency * 1/float(baseServiceTime)) *\ args.slowServerSlowness numSlowServers = int(args.slowServerFraction * args.numServers) slowServerRates = [slowServerRate] * numSlowServers numFastServers = args.numServers - numSlowServers totalRate = (args.serverConcurrency * 1 / float(args.serviceTime) * args.numServers) fastServerRate = (totalRate - sum(slowServerRates))\ / float(numFastServers) fastServerRates = [fastServerRate] * numFastServers serviceRatePerServer = slowServerRates + fastServerRates else: serviceRatePerServer = [ args.serverConcurrency * 1 / float(args.serviceTime) ] * args.numServers random.shuffle(serviceRatePerServer) # print sum(serviceRatePerServer), (1/float(baseServiceTime)) * args.numServers assert sum(serviceRatePerServer) > 0.99 *\ (1/float(baseServiceTime)) * args.numServers assert sum(serviceRatePerServer) <=\ (1/float(baseServiceTime)) * args.numServers # Start the servers for i in range(args.numServers): st = 1 / float(serviceRatePerServer[i]) serv = server.Server(i, resourceCapacity=args.serverConcurrency, serviceTime=st, serviceTimeModel=args.serviceTimeModel) servers.append(serv) elif (args.expScenario == "timeVaryingServiceTimeServers"): assert args.intervalParam != 0.0 assert args.timeVaryingDrift != 0.0 # Monitor to track service rates serverRateMonitor = Simulation.Monitor(name="ServerRateMonitor") # Start the servers for i in range(args.numServers): serv = server.Server(i, resourceCapacity=args.serverConcurrency, serviceTime=(args.serviceTime), serviceTimeModel=args.serviceTimeModel) mup = muUpdater.MuUpdater(serv, args.intervalParam, args.serviceTime, args.timeVaryingDrift, serverRateMonitor) Simulation.activate(mup, mup.run(), at=0.0) muUpdaters.append(mup) servers.append(serv) else: print "Unknown experiment scenario" sys.exit(-1) baseDemandWeight = 1.0 clientWeights = [] assert args.highDemandFraction >= 0 and args.highDemandFraction < 1.0 assert args.demandSkew >= 0 and args.demandSkew < 1.0 assert not (args.demandSkew == 0 and args.highDemandFraction != 0) assert not (args.demandSkew != 0 and args.highDemandFraction == 0) if (args.highDemandFraction > 0.0 and args.demandSkew >= 0): heavyClientWeight = baseDemandWeight *\ args.demandSkew/args.highDemandFraction numHeavyClients = int(args.highDemandFraction * args.numClients) heavyClientWeights = [heavyClientWeight] * numHeavyClients lightClientWeight = baseDemandWeight *\ (1 - args.demandSkew)/(1 - args.highDemandFraction) numLightClients = args.numClients - numHeavyClients lightClientWeights = [lightClientWeight] * numLightClients clientWeights = heavyClientWeights + lightClientWeights else: clientWeights = [baseDemandWeight] * args.numClients assert sum(clientWeights) > 0.99 * args.numClients assert sum(clientWeights) <= args.numClients # Start the clients for i in range(args.numClients): c = client.Client(id_="Client%s" % (i), serverList=servers, replicaSelectionStrategy=args.selectionStrategy, accessPattern=args.accessPattern, replicationFactor=args.replicationFactor, backpressure=args.backpressure, shadowReadRatio=args.shadowReadRatio, rateInterval=args.rateInterval, cubicC=args.cubicC, cubicSmax=args.cubicSmax, cubicBeta=args.cubicBeta, hysterisisFactor=args.hysterisisFactor, demandWeight=clientWeights[i], costExponent=args.costExponent, concurrencyWeight=args.concurrencyWeight) clients.append(c) # Start workload generators (analogous to YCSB) latencyMonitor = Simulation.Monitor(name="Latency") # This is where we set the inter-arrival times based on # the required utilization level and the service time # of the overall server pool. arrivalRate = 0 interArrivalTime = 0 if (len(serviceRatePerServer) > 0): print serviceRatePerServer arrivalRate = (args.utilization * sum(serviceRatePerServer)) interArrivalTime = 1 / float(arrivalRate) elif (args.expScenario == "timeVaryingServiceTimeServers"): mu = 1 / float(args.serviceTime) mu_dot_D = mu * args.timeVaryingDrift avg_mu = (mu + mu_dot_D) / 2.0 arrivalRate = args.utilization *\ (args.numServers * args.serverConcurrency * avg_mu) interArrivalTime = 1 / float(arrivalRate) print "avg_mu", avg_mu, "mu", mu, "mu.D", mu_dot_D print "serviceTime", args.serviceTime print "interArrivalTime", interArrivalTime,\ "interArrivalTimeMu",\ 1/(args.numServers * args.utilization * args.serverConcurrency * mu),\ "interArrivalTimeMuD",\ 1/(args.numServers * args.utilization * args.serverConcurrency * mu_dot_D) print "capacity", interArrivalTime/args.utilization,\ "capacityMu",\ 1/(args.numServers * args.serverConcurrency * mu),\ "capacityMuD",\ 1/(args.numServers * args.serverConcurrency * mu_dot_D) else: arrivalRate = args.numServers *\ (args.utilization * args.serverConcurrency * 1/float(args.serviceTime)) interArrivalTime = 1 / float(arrivalRate) print "serviceTime", args.serviceTime print "interArrivalTime", interArrivalTime for i in range(args.numWorkload): w = workload.Workload(i, latencyMonitor, clients, args.workloadModel, interArrivalTime * args.numWorkload, args.numRequests / args.numWorkload, args.batchSizeModel, args.batchSizeParam) Simulation.activate(w, w.run(), at=0.0), workloadGens.append(w) # Begin simulation Simulation.simulate(until=args.simulationDuration) # # Print a bunch of timeseries # pendingRequestsFD = open( "../%s/%s_PendingRequests" % (args.logFolder, args.expPrefix), 'w') waitMonFD = open("../%s/%s_WaitMon" % (args.logFolder, args.expPrefix), 'w') actMonFD = open("../%s/%s_ActMon" % (args.logFolder, args.expPrefix), 'w') latencyFD = open("../%s/%s_Latency" % (args.logFolder, args.expPrefix), 'w') latencyTrackerFD = open( "../%s/%s_LatencyTracker" % (args.logFolder, args.expPrefix), 'w') rateFD = open("../%s/%s_Rate" % (args.logFolder, args.expPrefix), 'w') tokenFD = open("../%s/%s_Tokens" % (args.logFolder, args.expPrefix), 'w') receiveRateFD = open( "../%s/%s_ReceiveRate" % (args.logFolder, args.expPrefix), 'w') edScoreFD = open("../%s/%s_EdScore" % (args.logFolder, args.expPrefix), 'w') serverRRFD = open("../%s/%s_serverRR" % (args.logFolder, args.expPrefix), 'w') serverRateFD = open( "../%s/%s_serverRate" % (args.logFolder, args.expPrefix), 'w') for clientNode in clients: printMonitorTimeSeriesToFile(pendingRequestsFD, clientNode.id, clientNode.pendingRequestsMonitor) printMonitorTimeSeriesToFile(latencyTrackerFD, clientNode.id, clientNode.latencyTrackerMonitor) printMonitorTimeSeriesToFile(rateFD, clientNode.id, clientNode.rateMonitor) printMonitorTimeSeriesToFile(tokenFD, clientNode.id, clientNode.tokenMonitor) printMonitorTimeSeriesToFile(receiveRateFD, clientNode.id, clientNode.receiveRateMonitor) printMonitorTimeSeriesToFile(edScoreFD, clientNode.id, clientNode.edScoreMonitor) for serv in servers: printMonitorTimeSeriesToFile(waitMonFD, serv.id, serv.queueResource.waitMon) printMonitorTimeSeriesToFile(actMonFD, serv.id, serv.queueResource.actMon) printMonitorTimeSeriesToFile(serverRRFD, serv.id, serv.serverRRMonitor) print "------- Server:%s %s ------" % (serv.id, "WaitMon") print "Mean:", serv.queueResource.waitMon.mean() print "------- Server:%s %s ------" % (serv.id, "ActMon") print "Mean:", serv.queueResource.actMon.mean() if (len(muUpdaters) > 0): printMonitorTimeSeriesToFile(serverRateFD, "0", muUpdaters[0].serviceRateMonitor) print "------- Latency ------" print "Mean Latency:",\ sum([float(entry[1].split()[0]) for entry in latencyMonitor])\ / float(len(latencyMonitor)) printMonitorTimeSeriesToFile(latencyFD, "0", latencyMonitor) assert args.numRequests == len(latencyMonitor)
def __init__(self, id_, serverList, replicaSelectionStrategy, accessPattern, replicationFactor, backpressure, shadowReadRatio, rateInterval, cubicC, cubicSmax, cubicBeta, hysterisisFactor, demandWeight, costExponent, concurrencyWeight): self.id = id_ self.serverList = serverList self.accessPattern = accessPattern self.replicationFactor = replicationFactor self.REPLICA_SELECTION_STRATEGY = replicaSelectionStrategy self.pendingRequestsMonitor = \ Simulation.Monitor(name="PendingRequests") self.latencyTrackerMonitor = Simulation.Monitor(name="ResponseHandler") self.rateMonitor = Simulation.Monitor(name="AlphaMonitor") self.receiveRateMonitor = Simulation.Monitor(name="ReceiveRateMonitor") self.tokenMonitor = Simulation.Monitor(name="TokenMonitor") self.edScoreMonitor = Simulation.Monitor(name="edScoreMonitor") self.backpressure = backpressure # True/Flase self.shadowReadRatio = shadowReadRatio self.demandWeight = demandWeight self.costExponent = costExponent self.concurrencyWeight = concurrencyWeight # Book-keeping and metrics to be recorded follow... # Number of outstanding requests at the client self.pendingRequestsMap = {node: 0 for node in serverList} # Number of outstanding requests times oracle-service time of replica self.pendingXserviceMap = {node: 0 for node in serverList} # Last-received response time of server self.responseTimesMap = {node: 0 for node in serverList} # Used to track response time from the perspective of the client self.taskSentTimeTracker = {} self.taskArrivalTimeTracker = {} self.taskBatchCounter = {} # Record waiting and service times as relayed by the server self.expectedDelayMap = {node: {} for node in serverList} self.lastSeen = {node: 0 for node in serverList} # Round robin parameters self.rrIndex = {node: 0 for node in serverList} # Rate limiters per replica self.rateLimiters = { node: RateLimiter("RL-%s" % node.id, self, 10, rateInterval) for node in serverList } self.lastRateDecrease = {node: 0 for node in serverList} self.valueOfLastDecrease = {node: 10 for node in serverList} self.receiveRate = { node: ReceiveRate("RL-%s" % node.id, rateInterval) for node in serverList } self.lastRateIncrease = {node: 0 for node in serverList} self.rateInterval = rateInterval # Parameters for congestion control self.cubicC = cubicC self.cubicSmax = cubicSmax self.cubicBeta = cubicBeta self.hysterisisFactor = hysterisisFactor # Parameters for Refresh selection self.sendtime = {node: 0.00000000 for node in serverList} # Backpressure related initialization if (backpressure is True): self.backpressureSchedulers = \ {node: BackpressureScheduler("BP-%s" % node.id, self) for node in serverList} for node in serverList: Simulation.activate(self.backpressureSchedulers[node], self.backpressureSchedulers[node].run(), at=Simulation.now()) # ds-metrics if (replicaSelectionStrategy == "ds"): self.latencyEdma = { node: ExponentiallyDecayingSample(100, 0.75, self.clock) for node in serverList } self.dsScores = {node: 0 for node in serverList} for node, rateLimiter in self.rateLimiters.items(): ds = DynamicSnitch(self, 100) Simulation.activate(ds, ds.run(), at=Simulation.now())
def __init__(self, serverList, client): self.serverList = serverList self.client = client self.monitor = Simulation.Monitor(name="Latency") Simulation.Process.__init__(self, name='Observer')