Esempio n. 1
0
 def commit(self):
     self.logger.debug('%s start commit at %s' % (self, now()))
     wsStrings = []
     #write values to local group
     for itemID, value in self.writeset.iteritems():
         item = self.snode.groups[itemID.gid][itemID]
         item.write(value)
         if self.logger.isEnabledFor(logging.DEBUG):
             wsStrings.append('(%s, %s)' % (itemID, value))
         yield hold, self, RandInterval.get(
             *self.txn.config.get('commit.intvl.dist', ('fixed',
                                                        0))).next()
     yield hold, self, RandInterval.get(
         *self.txn.config.get('commit.time.dist', ('fixed', 0))).next()
     if self.logger.isEnabledFor(logging.DEBUG):
         self.logger.debug(
             '%s commit {%s} at %s' %
             (self.ID, ', '.join([s for s in wsStrings]), now()))
     #write to the original atomically
     dataset = self.snode.system.dataset
     for itemID, value in self.writeset.iteritems():
         dataset[itemID].write(value)
         dataset[itemID].lastWriteTxn = self.txn
     #release locks
     for step in self.releaseLocks():
         yield step
Esempio n. 2
0
 def commit(self):
     wsStrings = []
     for itemID, value in self.writeset.iteritems():
         item = self.snode.groups[itemID.gid][itemID]
         assert self.ts > item.version, \
                 'txn=%s, itemID=%s, curr=%s, prev=%s' \
                 %(self.txn.ID, itemID, self.ts, item.version)
         item.write(value, self.ts)
         if self.logger.isEnabledFor(logging.DEBUG):
             wsStrings.append('(%s, %s)' % (itemID, value))
         yield hold, self, RandInterval.get(
             *self.txn.config.get('commit.intvl.dist', ('fixed',
                                                        0))).next()
     yield hold, self, RandInterval.get(
         *self.txn.config.get('commit.time.dist', ('fixed', 0))).next()
     if self.logger.isEnabledFor(logging.DEBUG):
         self.logger.debug('%s commit {%s}' %
                           (self.ID, ', '.join([s for s in wsStrings])))
     dataset = self.snode.system.dataset
     for itemID, value in self.writeset.iteritems():
         dataset[itemID].write(value, self.ts)
         dataset[itemID].lastWriteTxn = self.txn
     for lock in self.locks:
         for step in self.unlock(lock):
             yield step
Esempio n. 3
0
def getGenerator(gcfg):
    try:
        key, mean, cfg = gcfg
    except:
        key, mean = gcfg
        cfg = {}
    return RandInterval.get(key, mean, cfg)
Esempio n. 4
0
def getGenerator(gcfg):
    try:
        key, mean, cfg = gcfg
    except:
        key, mean = gcfg
        cfg = {}
    return RandInterval.get(key, mean, cfg)
Esempio n. 5
0
 def sample(cls, config, h=0.1, tail=('p', 0), tnh=1, num=100000):
     try:
         key, mean, cfg = config
     except:
         key, mean = config
         cfg = {}
     x = RandInterval.generate(key, mean, cfg, num)
     return cls.create(x, h, tail, tnh)
Esempio n. 6
0
 def sample(cls, config, h=0.1, tail=('p', 0), tnh=1, num=100000):
     try:
         key, mean, cfg = config
     except:
         key, mean = config
         cfg = {}
     x = RandInterval.generate(key, mean, cfg, num)
     return cls.create(x, h, tail, tnh)
Esempio n. 7
0
def runSingleAcceptorFailDiscarded(nprop, mean, lcfg, debug=False):
    #generate X_i
    expoGen = RandInterval.get('expo', mean)
    X = [expoGen.next()]
    for i in range(1, nprop):
        X.append(expoGen.next() + X[i-1])
    #run
    lGen = getGenerator(lcfg)
    A = [0] * nprop
    Y = []
    V = []
    B = [-1] * nprop
    E = [-1] * nprop
    S = [None] * nprop
    for j in range(nprop):
        Y.append([0] * nprop)
        V.append([0] * nprop)
        finish = True
        #compute Y
        finish = computeYInd(j, X, Y, A, V, B, E, lGen)
        if finish:
            break
        #compute A
        mval, midx = computeA(j, Y, A)
        #compute V
        for i in range(nprop):
            V[j][i] = A[j] + lGen.next()
        #compute B and E
        for i in range(nprop):
            if X[i] < V[j][i] and B[i] == -1:
                B[i] = j
                E[i] = j
                S[i] = False
        S[midx] = True
        if debug:
            printXYAVBE(j, X, Y, A, V, B, E)
            strs = ['S:']
            for i in range(nprop):
                strs.append(str(S[i])[0])
            print ' '.join(strs)
            print
    #compute fail rate and fail latency
    failCnt = 0
    succLatencies = []
    failLatencies = []
    for i in range(nprop):
        if S[i] is False:
            failCnt += 1
            failLatencies.append(V[E[i]][i] - X[i])
        else:
            succLatencies.append(V[E[i]][i] - X[i])
    print 'fail.prob=%s'%(float(failCnt) / nprop)
    print 'succ.latency.mean=%s'%numpy.mean(succLatencies)
    print 'succ.latency.std=%s'%numpy.std(succLatencies)
    print 'succ.latency.histo=(%s, %s)'%numpy.histogram(succLatencies)
    print 'fail.latency.mean=%s'%numpy.mean(failLatencies)
    print 'fail.latency.std=%s'%numpy.std(failLatencies)
    print 'fail.latency.histo=(%s, %s)'%numpy.histogram(failLatencies)
Esempio n. 8
0
def runSingleAcceptorFailDiscarded(nprop, mean, lcfg, debug=False):
    #generate X_i
    expoGen = RandInterval.get('expo', mean)
    X = [expoGen.next()]
    for i in range(1, nprop):
        X.append(expoGen.next() + X[i - 1])
    #run
    lGen = getGenerator(lcfg)
    A = [0] * nprop
    Y = []
    V = []
    B = [-1] * nprop
    E = [-1] * nprop
    S = [None] * nprop
    for j in range(nprop):
        Y.append([0] * nprop)
        V.append([0] * nprop)
        finish = True
        #compute Y
        finish = computeYInd(j, X, Y, A, V, B, E, lGen)
        if finish:
            break
        #compute A
        mval, midx = computeA(j, Y, A)
        #compute V
        for i in range(nprop):
            V[j][i] = A[j] + lGen.next()
        #compute B and E
        for i in range(nprop):
            if X[i] < V[j][i] and B[i] == -1:
                B[i] = j
                E[i] = j
                S[i] = False
        S[midx] = True
        if debug:
            printXYAVBE(j, X, Y, A, V, B, E)
            strs = ['S:']
            for i in range(nprop):
                strs.append(str(S[i])[0])
            print ' '.join(strs)
            print
    #compute fail rate and fail latency
    failCnt = 0
    succLatencies = []
    failLatencies = []
    for i in range(nprop):
        if S[i] is False:
            failCnt += 1
            failLatencies.append(V[E[i]][i] - X[i])
        else:
            succLatencies.append(V[E[i]][i] - X[i])
    print 'fail.prob=%s' % (float(failCnt) / nprop)
    print 'succ.latency.mean=%s' % numpy.mean(succLatencies)
    print 'succ.latency.std=%s' % numpy.std(succLatencies)
    print 'succ.latency.histo=(%s, %s)' % numpy.histogram(succLatencies)
    print 'fail.latency.mean=%s' % numpy.mean(failLatencies)
    print 'fail.latency.std=%s' % numpy.std(failLatencies)
    print 'fail.latency.histo=(%s, %s)' % numpy.histogram(failLatencies)
Esempio n. 9
0
 def commit(self, content):
     attemptNo, label, ts = content
     for itemID, value in self.writeset.iteritems():
         item = self.snode.groups[itemID.gid][itemID]
         assert ts > item.version
         item.write(value, ts)
         yield hold, self, RandInterval.get(*self.txn.config.get(
             'commit.intvl.dist', ('fixed', 0))).next()
     yield hold, self, RandInterval.get(*self.txn.config.get(
         'commit.time.dist', ('fixed', 0))).next()
     #write to the original atomically
     dataset = self.snode.system.dataset
     for itemID, value in self.writeset.iteritems():
         dataset[itemID].write(value, ts)
     self.progress = (attemptNo, TPCProtocol.COMMITTED)
     self.logger.debug('%s committed with attemptNo %s at %s'
                       %(self.ID, attemptNo, now()))
     self.close()
Esempio n. 10
0
 def run(self):
     self.logger.debug('%s start at %s' % (self.ID, now()))
     self.Preparing()
     for step in self.prepare():
         yield step
     while True:
         try:
             #start
             self.Running()
             for step in self.begin():
                 yield step
             #read and write
             for action in self.txn.actions:
                 if action.isRead():
                     for step in self.read(action.itemID, action.attr):
                         yield step
                 else:
                     assert action.isWrite()
                     for step in self.write(action.itemID, action.attr):
                         yield step
                 #simulate the cost of each read/write step
                 yield hold, self, RandInterval.get(
                     *self.txn.config['action.intvl.dist']).next()
             #try commit
             self.Committing()
             for step in self.trycommit():
                 yield step
             #the commit phase is error free
             for step in self.commit():
                 yield step
             self.Committed()
             break
         except BThread.DeadlockException as e:
             self.logger.debug('%s aborted because of deadlock %s at %s' %
                               (self.ID, str(e), now()))
             self.monitor.observe('deadlock.cycle.length',
                                  len(e.waiters) + 1)
             self.monitor.start('abort.deadlock')
             self.Aborting()
             for step in self.abort():
                 yield step
             #wait for one of the waiters to leave
             waitEvts = []
             for w in e.waiters:
                 waitEvts.append(w.finish)
             yield waitevent, self, waitEvts
             self.monitor.stop('abort.deadlock')
         except TimeoutException as e:
             self.monitor.observe('abort.timeout', 0)
             self.logger.debug(
                 '%s aborted because of timeout on %r with state %s' %
                 (e.args[0], e.args[1]))
     for step in self.cleanup():
         yield step
     self.Finished()
Esempio n. 11
0
 def run(self):
     self.logger.debug('%s start at %s' %(self.ID, now()))
     self.Preparing()
     for step in self.prepare():
         yield step
     while True:
         try:
             #start
             self.Running()
             for step in self.begin():
                 yield step
             #read and write
             for action in self.txn.actions:
                 if action.isRead():
                     for step in self.read(action.itemID, action.attr):
                         yield step
                 else:
                     assert action.isWrite()
                     for step in self.write(action.itemID, action.attr):
                         yield step
                 #simulate the cost of each read/write step
                 yield hold, self, RandInterval.get(
                     *self.txn.config['action.intvl.dist']).next()
             #try commit
             self.Committing()
             for step in self.trycommit():
                 yield step
             #the commit phase is error free
             for step in self.commit():
                 yield step
             self.Committed()
             break
         except BThread.DeadlockException as e:
             self.logger.debug('%s aborted because of deadlock %s at %s'
                               %(self.ID, str(e), now()))
             self.monitor.observe('deadlock.cycle.length',
                                  len(e.waiters) + 1)
             self.monitor.start('abort.deadlock')
             self.Aborting()
             for step in self.abort():
                 yield step
             #wait for one of the waiters to leave
             waitEvts = []
             for w in e.waiters:
                 waitEvts.append(w.finish)
             yield waitevent, self, waitEvts
             self.monitor.stop('abort.deadlock')
         except TimeoutException as e:
             self.monitor.observe('abort.timeout', 0)
             self.logger.debug(
                 '%s aborted because of timeout on %r with state %s'
                 %(e.args[0], e.args[1]))
     for step in self.cleanup():
         yield step
     self.Finished()
Esempio n. 12
0
 def commit(self):
     self.logger.debug('%s start commit at %s'%(self, now()))
     wsStrings = []
     #write values to local group
     for itemID, value in self.writeset.iteritems():
         item = self.snode.groups[itemID.gid][itemID]
         item.write(value)
         if self.logger.isEnabledFor(logging.DEBUG):
             wsStrings.append('(%s, %s)'%(itemID, value))
         yield hold, self, RandInterval.get(*self.txn.config.get(
             'commit.intvl.dist', ('fixed', 0))).next()
     yield hold, self, RandInterval.get(*self.txn.config.get(
         'commit.time.dist', ('fixed', 0))).next()
     if self.logger.isEnabledFor(logging.DEBUG):
         self.logger.debug('%s commit {%s} at %s'
                           %(self.ID, ', '.join([s for s in wsStrings]), now()))
     #write to the original atomically
     dataset = self.snode.system.dataset
     for itemID, value in self.writeset.iteritems():
         dataset[itemID].write(value)
         dataset[itemID].lastWriteTxn = self.txn
     #release locks
     for step in self.releaseLocks():
         yield step
Esempio n. 13
0
 def commit(self):
     wsStrings = []
     for itemID, value in self.writeset.iteritems():
         item = self.snode.groups[itemID.gid][itemID]
         assert self.ts > item.version, \
                 'txn=%s, itemID=%s, curr=%s, prev=%s' \
                 %(self.txn.ID, itemID, self.ts, item.version)
         item.write(value, self.ts)
         if self.logger.isEnabledFor(logging.DEBUG):
             wsStrings.append('(%s, %s)'%(itemID, value))
         yield hold, self, RandInterval.get(*self.txn.config.get(
             'commit.intvl.dist', ('fixed', 0))).next()
     yield hold, self, RandInterval.get(*self.txn.config.get(
         'commit.time.dist', ('fixed', 0))).next()
     if self.logger.isEnabledFor(logging.DEBUG):
         self.logger.debug('%s commit {%s}'
                           %(self.ID, ', '.join([s for s in wsStrings])))
     dataset = self.snode.system.dataset
     for itemID, value in self.writeset.iteritems():
         dataset[itemID].write(value, self.ts)
         dataset[itemID].lastWriteTxn = self.txn
     for lock in self.locks:
         for step in self.unlock(lock):
             yield step
Esempio n. 14
0
def test():
    try:
        numZones = int(sys.argv[1])
        numSNodes = int(sys.argv[2])
    except:
        numZones = 2
        numSNodes = 2
    print numZones, numSNodes
    #initialize
    logging.basicConfig(level=logging.DEBUG)
    configs = {
        'max.num.txns.per.storage.node' : 1,
        'nw.latency.within.zone' : ('fixed', 0),
        'nw.latency.cross.zone' : ('fixed', 0),
    }
    groups = {}
    for i in range(numSNodes):
        groups[i] = 128
    configs['dataset.groups'] = groups
    configs['num.zones'] = numZones
    configs['num.storage.nodes.per.zone'] = numSNodes
    initialize()
    RTI.initialize(configs)
    system = BaseSystem(configs)
    #txn generation
    curr = 0
    for i in range(TEST_NUM_TXNS):
        txnID = i
        zoneID = random.randint(0, configs['num.zones'] - 1)
        gid = random.randint(0, numSNodes - 1)
        txn = FakeTxn(txnID, zoneID, gid)
        at = curr + RandInterval.get(
            'expo', TEST_TXN_ARRIVAL_PERIOD / TEST_NUM_TXNS).next()
        curr  = at
        system.schedule(txn, at)
        logging.info('txnID=%s, zoneID=%s, gids=%s at=%s'
                     %(txnID, zoneID, txn.gids, at))
    #start simulation
    system.start()
    simulate(until=2 * TEST_TXN_ARRIVAL_PERIOD)

    #profile
    system.profile()
    #calculate m/m/s loss rate
    lambd = float(TEST_NUM_TXNS) / TEST_TXN_ARRIVAL_PERIOD
    mu = 1 / float(100)
    print erlangLoss(lambd / numZones / numSNodes, mu, 1)
    print erlangLoss(lambd / numZones, mu, numSNodes)
Esempio n. 15
0
def test():
    try:
        numZones = int(sys.argv[1])
        numSNodes = int(sys.argv[2])
    except:
        numZones = 2
        numSNodes = 2
    print numZones, numSNodes
    #initialize
    logging.basicConfig(level=logging.DEBUG)
    configs = {
        'max.num.txns.per.storage.node': 1,
        'nw.latency.within.zone': ('fixed', 0),
        'nw.latency.cross.zone': ('fixed', 0),
    }
    groups = {}
    for i in range(numSNodes):
        groups[i] = 128
    configs['dataset.groups'] = groups
    configs['num.zones'] = numZones
    configs['num.storage.nodes.per.zone'] = numSNodes
    initialize()
    RTI.initialize(configs)
    system = BaseSystem(configs)
    #txn generation
    curr = 0
    for i in range(TEST_NUM_TXNS):
        txnID = i
        zoneID = random.randint(0, configs['num.zones'] - 1)
        gid = random.randint(0, numSNodes - 1)
        txn = FakeTxn(txnID, zoneID, gid)
        at = curr + RandInterval.get(
            'expo', TEST_TXN_ARRIVAL_PERIOD / TEST_NUM_TXNS).next()
        curr = at
        system.schedule(txn, at)
        logging.info('txnID=%s, zoneID=%s, gids=%s at=%s' %
                     (txnID, zoneID, txn.gids, at))
    #start simulation
    system.start()
    simulate(until=2 * TEST_TXN_ARRIVAL_PERIOD)

    #profile
    system.profile()
    #calculate m/m/s loss rate
    lambd = float(TEST_NUM_TXNS) / TEST_TXN_ARRIVAL_PERIOD
    mu = 1 / float(100)
    print erlangLoss(lambd / numZones / numSNodes, mu, 1)
    print erlangLoss(lambd / numZones, mu, numSNodes)
Esempio n. 16
0
 def run(self):
     while True:
         instances = self.cnode.paxosLearner.instances
         while self.nextUpdateIID in instances:
             txn = instances[self.nextUpdateIID]
             #write values
             for action in txn.actions:
                 if action.label == Action.READ:
                     continue
                 itemID = action.itemID
                 value = action.attr
                 item = self.snode.groups[itemID.gid][itemID]
                 item.write(value)
                 yield hold, self, RandInterval.get(*txn.config.get(
                     'commit.intvl.dist', ('fixed', 0))).next()
             #report txn done
             self.invoke(self.cnode.onTxnDepart, txn).rtiCall()
             self.nextUpdateIID += 1
         yield waitevent, self, self.cnode.paxosLearner.newInstanceEvent
Esempio n. 17
0
 def generate(self):
     count = 1
     at = {}
     while count <= self.numTxns:
         for zoneID in range(self.numZones):
             txnID = count
             #choose a class
             txnCls = self.nextClass()
             #construct actions
             actions = self.nextActions(txnCls)
             txn = Transaction(txnID, zoneID, actions, txnCls.config)
             prev = at.get(zoneID, 0)
             intvl = RandInterval.get(*self.arrIntvDist).next()
             curr = prev + intvl
             at[zoneID] = curr
             yield txn, curr
             count += 1
             if count > self.numTxns:
                 break
Esempio n. 18
0
 def generate(self):
     count = 1
     at = {}
     while count <= self.numTxns:
         for zoneID in range(self.numZones):
             txnID = count
             #choose a class
             txnCls = self.nextClass()
             #construct actions
             actions = self.nextActions(txnCls)
             txn = Transaction(txnID, zoneID, actions, txnCls.config)
             prev = at.get(zoneID, 0)
             intvl = RandInterval.get(*self.arrIntvDist).next()
             curr = prev + intvl
             at[zoneID] = curr
             yield txn, curr
             count += 1
             if count > self.numTxns:
                 break
Esempio n. 19
0
 def run(self):
     while True:
         instances = self.cnode.paxosLearner.instances
         while self.nextUpdateIID in instances:
             txn = instances[self.nextUpdateIID]
             #write values
             for action in txn.actions:
                 if action.label == Action.READ:
                     continue
                 itemID = action.itemID
                 value = action.attr
                 item = self.snode.groups[itemID.gid][itemID]
                 item.write(value)
                 yield hold, self, RandInterval.get(
                     *txn.config.get('commit.intvl.dist', ('fixed',
                                                           0))).next()
             #report txn done
             self.invoke(self.cnode.onTxnDepart, txn).rtiCall()
             self.nextUpdateIID += 1
         yield waitevent, self, self.cnode.paxosLearner.newInstanceEvent
Esempio n. 20
0
 def __init__(self, interval, name=None,
              until=infinite, drift=('fixed', 0)):
     Process.__init__(self)
     self.interval = interval
     if name is not None:
         eventname = name
     else:
         eventname = "a_SimEvent"
     self.event = SimEvent(eventname)
     self.until = until
     try:
         key, mean, cfg = drift
     except ValueError:
         key, mean = drift
         cfg = {}
     lb = cfg.get('lb', 0); ub = cfg.get('ub', interval)
     if lb < 0: raise ValueError('drift lb = %s >= 0' %lb)
     if ub > interval:
         raise ValueError('drift ub = %s < %s = interval' %interval)
     cfg['lb'] = lb; cfg['ub'] = ub
     self.rgen = RandInterval.get(key, mean, cfg)
Esempio n. 21
0
 def __init__(self, configs):
     self.withinGen = RandInterval.get(
         *configs[IIDLatencyNetwork.WITHIN_KEY])
     self.crossGen = RandInterval.get(*configs[IIDLatencyNetwork.CROSS_KEY])
Esempio n. 22
0
 def run(self):
     self.logger.debug('Running transaction %s at %s' %
                       (txn.ID, now()))
     yield hold, self, RandInterval.get('expo', 100).next()
Esempio n. 23
0
 def __init__(self, configs):
     self.withinGen = RandInterval.get(
         *configs[IIDLatencyNetwork.WITHIN_KEY])
     self.crossGen = RandInterval.get(
         *configs[IIDLatencyNetwork.CROSS_KEY])
Esempio n. 24
0
 def run(self):
     self.logger.debug('Running transaction %s at %s'
                       %(txn.ID, now()))
     yield hold, self, RandInterval.get('expo', 100).next()
Esempio n. 25
0
def runSingleAcceptorFailRestartNImm(nprop, mean, lcfg, debug=False):
    #generate X_i
    expoGen = RandInterval.get('expo', mean)
    X = [expoGen.next()]
    for i in range(1, nprop):
        X.append(expoGen.next() + X[i - 1])
    #run
    lGen = getGenerator(lcfg)
    A = [0] * nprop
    Y = []
    V = []
    B = [-1] * nprop
    E = [-1] * nprop
    prevTime = time.time()
    for j in range(nprop):
        curr = time.time()
        if curr - prevTime > 5:
            print 'on slot %s' % j
            prevTime = curr
        Y.append([0] * nprop)
        V.append([0] * nprop)
        #compute Y
        computeYNImm(j, X, Y, A, V, B, E, lGen)
        #compute A
        mval, midx = computeA(j, Y, A)
        #compute V
        for i in range(nprop):
            V[j][i] = A[j] + lGen.next()
        #compute B and E
        for i in range(nprop):
            if X[i] < A[j] and B[i] == -1:
                B[i] = j
        E[midx] = j
        if debug:
            printXYAVBE(j, X, Y, A, V, B, E)
            print
    #compute stats
    R = [0] * nprop
    T = [0] * nprop
    S = []
    F = []
    for i in range(nprop):
        R[i] = E[i] - B[i]
        T[i] = V[E[i]][i] - X[i]
        if B[i] != E[i]:
            F.append(V[B[i]][i] - X[i])
        for k in range(B[i] + 1, E[i]):
            F.append(V[k][i] - V[k - 1][i])
        if B[i] == E[i]:
            S.append(V[E[i]][i] - X[i])
        else:
            S.append(V[E[i]][i] - V[E[i] - 1][i])
    if debug:
        strs = ['Yk:']
        for i in range(nprop):
            strs.append('%.2f' % Y[E[i]][i])
        print ' '.join(strs)
        strs = ['T:']
        for i in range(nprop):
            strs.append('%.2f' % T[i])
        print ' '.join(strs)
    print 'nretries.mean=%s' % numpy.mean(R)
    print 'nretries.std=%s' % numpy.std(R)
    print 'nretries.histo=(%s, %s)' % numpy.histogram(R)
    print 'total.latency.mean=%s' % numpy.mean(T)
    print 'total.latency.std=%s' % numpy.std(T)
    print 'total.latency.histo=(%s, %s)' % numpy.histogram(T)
    print 'succ.latency.mean=%s' % numpy.mean(S)
    print 'succ.latency.std=%s' % numpy.std(S)
    print 'succ.latency.histo=(%s, %s)' % numpy.histogram(S)
    print 'fail.latency.mean=%s' % numpy.mean(F)
    print 'fail.latency.std=%s' % numpy.std(F)
    print 'fail.latency.histo=(%s, %s)' % numpy.histogram(F)
Esempio n. 26
0
def runSingleAcceptorFailRestartNImm(nprop, mean, lcfg, debug=False):
    #generate X_i
    expoGen = RandInterval.get('expo', mean)
    X = [expoGen.next()]
    for i in range(1, nprop):
        X.append(expoGen.next() + X[i-1])
    #run
    lGen = getGenerator(lcfg)
    A = [0] * nprop
    Y = []
    V = []
    B = [-1] * nprop
    E = [-1] * nprop
    prevTime = time.time()
    for j in range(nprop):
        curr = time.time()
        if curr - prevTime > 5:
            print 'on slot %s'%j
            prevTime = curr
        Y.append([0] * nprop)
        V.append([0] * nprop)
        #compute Y
        computeYNImm(j, X, Y, A, V, B, E, lGen)
        #compute A
        mval, midx = computeA(j, Y, A)
        #compute V
        for i in range(nprop):
            V[j][i] = A[j] + lGen.next()
        #compute B and E
        for i in range(nprop):
            if X[i] < A[j] and B[i] == -1:
                B[i] = j
        E[midx] = j
        if debug:
            printXYAVBE(j, X, Y, A, V, B, E)
            print
    #compute stats
    R = [0] * nprop
    T = [0] * nprop
    S = []
    F = []
    for i in range(nprop):
        R[i] = E[i] - B[i]
        T[i] = V[E[i]][i] - X[i]
        if B[i] != E[i]:
            F.append(V[B[i]][i] - X[i])
        for k in range(B[i] + 1, E[i]):
            F.append(V[k][i] - V[k-1][i])
        if B[i] == E[i]:
            S.append(V[E[i]][i] - X[i])
        else:
            S.append(V[E[i]][i] - V[E[i] - 1][i])
    if debug:
        strs = ['Yk:']
        for i in range(nprop):
            strs.append('%.2f'%Y[E[i]][i])
        print ' '.join(strs)
        strs = ['T:']
        for i in range(nprop):
            strs.append('%.2f'%T[i])
        print ' '.join(strs)
    print 'nretries.mean=%s'%numpy.mean(R)
    print 'nretries.std=%s'%numpy.std(R)
    print 'nretries.histo=(%s, %s)'%numpy.histogram(R)
    print 'total.latency.mean=%s'%numpy.mean(T)
    print 'total.latency.std=%s'%numpy.std(T)
    print 'total.latency.histo=(%s, %s)'%numpy.histogram(T)
    print 'succ.latency.mean=%s'%numpy.mean(S)
    print 'succ.latency.std=%s'%numpy.std(S)
    print 'succ.latency.histo=(%s, %s)'%numpy.histogram(S)
    print 'fail.latency.mean=%s'%numpy.mean(F)
    print 'fail.latency.std=%s'%numpy.std(F)
    print 'fail.latency.histo=(%s, %s)'%numpy.histogram(F)