Пример #1
0
 def __init__(self, id, rackid, trace, obj_size, threadNum):
     self._id = id
     self._rackid = rackid
     self._trace = trace[:]
     self._obj_size = obj_size
     self._pool = multiThread.ThreadPool(1)
     self._fname = str(id)
Пример #2
0
 def __init__(self, id, rackid, trace, obj_size, threadNum):
     self._id = id
     self._rackid = rackid
     self._trace = deque(trace)
     self._obj_size = obj_size
     self._pool = multiThread.ThreadPool(1)
     self._fname = str(id)
     #self._jlist = np.asarray(self._trace)
     self.ave_latency = 0
     self.request_count = 0
Пример #3
0
def set_cache(hierarchy, env, shadow_window, racknum, fname):
    print racknum
    pool = multiThread.ThreadPool(racknum)
    for rackid in range(racknum):
        pool.add_task(set_cache_size2, hierarchy, env, shadow_window, rackid,
                      fname)
    pool.wait_completion()

    fd = open(fname, "a")
    fd.write("\n")
    fd.write("------------------------------")
    fd.write("\n")
    fd.close()

    reset_counters(hierarchy, racknum)
Пример #4
0
    #	jobList.append(inputParser2(config.get('Simulation', 'input12')))
    #	#jobList=[]
    #jobList=trace[:]
    for i in xrange(10000000):
        reqList.append(i + 1)
    for i in range(nodeNum):
        for j in range(clientNum):
            clientList[counter] = Client(counter, i, trace, obj_size,
                                         threadNum)
            counter += 1
    #for i in clientList.keys():
    #	print i, clientList[i]._trace
    print "Input file is read"
    #	jobList=[]
    #	jobList.append(inputParser2(config.get('Simulation', 'input')))
    pool = multiThread.ThreadPool(clientNum)
    for key in clientList.keys():
        pool.add_task(issueRequests, clientList[key], hierarchy, logger, env,
                      links, threadNum)
    pool.wait_completion()

    env.process(x(hierarchy, env))

    env.run()

    #test_cache(jobList,hierarchy)

    logger.info("Simulation Ends")
    logger.info("Collecting Statistics...")
    print "Simulation Ends"
    print "Collecting Statistics..."
Пример #5
0
    df = traceParser(trace_file)
    logger.info('Generating Final Trace File...')
    print("Generating Final Trace File...")

    logger.info('Running Simulation')
    print('Running Simulation')

    dc.scheduler.addJobs(df)
    #  dc.scheduler.allocateJob()
    #  dc.scheduler.addJobs(df)
    #  dc.scheduler.start()
    #s_thread = threading.Thread(target=dc.scheduler.start2(env))
    #s_thread.start()
    print("first jobs allocated")
    # Thread pool for mappers
    pool = multiThread.ThreadPool(len(dc.mapper_list.keys()))
    for i in dc.mapper_list.keys():
        pool.add_task(event.request_generator, i, dc, dc.scheduler, env)
    pool.wait_completion()
    env.run()
    sort_by_ctime = dc.blk_dir.df.sort_values('c_time', ascending=False)
    print('---------sorted--------------')
    print(sort_by_ctime)
    print('---------jobs--------------')
    print(dc.jobStat.df)
    print('----------wb-cache----------')
    print(dc.blk_dir.obj_df.sort_values('c_time', ascending=False))
    print('----------osd-mapping----------')
    print(dc.osdMap)
    print('----------print cache----------')
    dc.cache_layer['writeCache'].print()