Esempio n. 1
0
    def __init__(self, master):
        super().__init__(master)

        self.columnconfigure(0, weight=1)
        self.columnconfigure(1, weight=1)

        self.project_dir = DirectoryInput(root,
                                          text='Project Directory:',
                                          padding=10)
        self.project_dir.grid(row=0, columnspan=2, sticky='ew')

        baseframe = tk.LabelFrame(root, text='Baseline')
        baseframe.columnconfigure(0, weight=1)
        baseframe.columnconfigure(1, weight=1)
        baseframe.grid(row=1, column=0, sticky='nsew')

        baseline = runner.Runner(baseframe)  #, run_button=False)
        baseline.grid(sticky='nsew')

        diffframe = tk.LabelFrame(root, text='Alternative')
        diffframe.columnconfigure(0, weight=1)
        diffframe.columnconfigure(1, weight=1)
        diffframe.grid(row=1, column=1, sticky='nsew')

        different = runner.Runner(diffframe)  #, run_button=False)
        different.grid(row=0, column=1, sticky='nsew')

        filediffs = FileDiffs(self)
        filediffs.grid(row=2, columnspan=2, sticky='ew')
Esempio n. 2
0
def printSMCRegTables(dataType='double',
                      numRegs=[0, 1, 2, 4, 8, 16, 32, 64, 128, 256],
                      dispChem=False,
                      dispDyn=False):
    tests = [
        ('lidryer', {
            'Num Species': 9
        }),  # hydrogen
        ('drm19', {
            'Num Species': 21
        }),  # methane
        ('grimech30', {
            'Num Species': 53
        }),  # methane
        ('hai', {
            'Num Species': 71
        }),  # propane
        ('prf_ethanol', {
            'Num Species': 107
        })
    ]  # ethanol
    dynSA = analyze.StaticAnalysis(
        os.path.join(baseXMLDir, 'advance-smc-modified.xml'))
    f = open('RegAllocTables.tsv', 'wt')
    for (chem, config) in tests:
        chemSA = analyze.StaticAnalysis(
            os.path.join(baseXMLDir, '%s.xml' % chem))
        for nreg in numRegs:
            config['FP Regs/thread'] = nreg
            config['Int Regs/thread'] = nreg
            r = runner.Runner(chemSA).runModel(config)
            runner.writeRegAllocTables(f, r, dataType)
            if dispChem:
                temp = r['functions'].values(
                )[0]['loops'][0]['regAlloc'][dataType]
                for varType in ['state', 'stream']:
                    t = temp[varType]
                    for (x, y) in [('inRegs', 'inCache'), ('hits', 'misses')]:
                        print '%d\t' * 3 % (t[x], t[y], t[x] + t[y]),
                print
            r = runner.Runner(dynSA).runModel(config)
            runner.writeRegAllocTables(f, r, dataType)
            if dispDyn:
                temp = r['regAlloc'][dataType]
                for varType in ['state', 'stream']:
                    t = temp[varType]
                    print '%d\t' * 3 % (t['hits'], t['misses'],
                                        t['hits'] + t['misses']),
                print

    f.close()
def test1():
  # parameters to vary 
  stddev = 0.2
  variedParamDict = {
    # paramDict[myVariedParam] = [variedParamTruthVal, 0.2] # for log normal
    "kon":  [0.5,stddev],
    "koff":  [5.0,stddev]
  }

  # list of observables to be scored by GA 
  outputList = { 
    #"Cai":OutputObj("Cai","mean",[8,10], # in [s]
    # 0.1),          # value you want 
    "Nai":OutputObj("Nai","val_vs_time",[  0, 2],
    [1,0.5,0.15],timeInterpolations=[  0,1,2]) # check that interpolated values at 0, 100, 200 are 1, 0.5 ... 
  }

  #testState = "Cai"          
  simulation = runner.Runner()
  results = run(
    simulation,
    yamlVarFile = "inputParams.yaml",
    variedParamDict = variedParamDict,
    jobDuration = 30e3, # [ms]
    numRandomDraws = 8,  
    numIters = 5,    
    #sigmaScaleRate = 0.45,
    outputList = outputList,
    debug = True
)
Esempio n. 4
0
    def start(self):
        self.startingPosition = self.entity.global_position
        self.dead = False
        self.started = False
        self.count = 0
        self.reachedGoal = True
        self.backUp = False
        self.count = 0
        self.stuck = False
        self.stuck_flag = False
        self.currentPath = []
        self.seed = random.randint(0, 4800)
        self.frame_count = 0

        self.prevPos = None

        self.closestPathNodes = None
        self.closeNodeSelf = None
        self.closeNodeTarget = None
        self.currentNodeIndex = 0

        self.entity.add_component(runner.Runner(self.manager, self.controller), self.physics)
        self.entity.add_component(powerup_manager.PowerupManager(self.controller, False), self.physics)

        self.vehicle = self.entity.add_component(VehicleScript(self.startingPosition, self.controller), self.physics)
        self.entity.register_handler(GameStarted, self.start_game)

        self.currentNodeXZ = self.manager.astar.findCurrentNode(
            (self.entity.transform().global_position().x, self.entity.transform().global_position().z))
def validation():

  testState = "Cai"
  simulation = runner.Runner()
  results = run(
    simulation,
    yamlVarFile = "inputParams.yaml",
    variedParamDict = variedParamListDefault,
    jobDuration = 25e3, # [ms] 
    numRandomDraws = 3,
    numIters = 10,
    sigmaScaleRate = 0.45,
    outputParamName = "Container",
    outputParamSearcher = testState,
    outputParamMethod = "min",
    outputParamTruthVal=0.1,
    debug = True
  )

  
  refKon = 0.6012
  bestKon = results['bestFitDict']
  bestKon = bestKon['kon']
  assert(np.abs(refKon - bestKon) < 1e-3), "FAIL!"
  print("PASS!!") 
Esempio n. 6
0
def doit():      
  import runner 
  simulation = runner.Runner()
  yamlFile = "inputParams.yaml"

  data = GenerateData(simulation,yamlFile)
  FitData(data,simulation,yamlFile) 
Esempio n. 7
0
def decodeFromJSON(session_name):
    # INIT RUNNERS LIST
    RUNNERS = []

    # INIT SPONSORS LIST
    SPONSORS = []

    # SEE IF THE SESSION EXISTS:
    rootdir = os.getcwd()
    sessionsdir = rootdir + "/sessions/"

    session_was_found = False
    for subdir, dirs, files in os.walk(sessionsdir):
        if subdir != sessionsdir:
            if subdir[len(sessionsdir):] == session_name:
                session_was_found = True
                with open(sessionsdir +session_name + "/runners.json", "r") as runnersFile:
                    runner_list = json.loads(runnersFile.read())

    if not session_was_found: 
        print(f"The session {session_name} could not be found")
        return None, None
    for new_runner in runner_list: 
        NEW_RUNNER = runner.Runner(new_runner[0].get("firstname"), new_runner[0].get("lastname")) 
        NEW_RUNNER.value_per_lap = new_runner[0].get("lapvalue") 
        NEW_RUNNER.number_of_laps = new_runner[0].get("numberoflaps")
        
        for sponsorship in new_runner[1]: 
            NEW_RUNNER.addSponsorship(sponsorship.get("firstname"), sponsorship.get("lastname"), sponsorship.get("valueperlap"), sponsorship.get("maxval"))
        RUNNERS.append(NEW_RUNNER)

    return RUNNERS, updateSponsorList(RUNNERS) 
def main():
    n_test = 1000
    args = parser.parse_args()
    logging.info('Loading graph %s' % args.graph_type)
    graph_train = [None] * (args.graph_nbr)
    for graph_ in range(args.graph_nbr):
        graph_train[graph_] = graph.Graph(graph_type=args.graph_type,
                                          min_n=args.node_min,
                                          max_n=args.node_max,
                                          p=args.p,
                                          m=args.m,
                                          seed=120 + graph_)
    graph_test = [None] * n_test
    for graph_ in range(n_test):
        graph_test[graph_] = graph.Graph(graph_type=args.graph_type,
                                         min_n=args.node_min,
                                         max_n=args.node_max,
                                         p=args.p,
                                         m=args.m,
                                         seed=1 + graph_)
    logging.info('Loading agent and environment')
    agent_class = agent.Agent(args.model, args.lr, args.bs, args.n_step,
                              args.environment_name, args.node_max)

    print("Running a single instance simulation...")
    my_runner = runner.Runner(graph_train, agent_class, args.verbose)
    my_runner.loop(args.graph_nbr, 1, args.niter)
    agent_class.save_model()
    #     torch.save(agent_class,"model.pt")
    my_runner.change_to_test(graph_test)
    my_runner.loop(n_test, 1, args.niter)
Esempio n. 9
0
def main(arglist):
    current_time = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
    writer = SummaryWriter(log_dir='./logs/' + current_time + '-snake')
    actors = 6
    if arglist.train == False:
        actors = 1
    env_runner = runner.Runner(arglist, arglist.scenario, actors)


    while arglist.train or env_runner.episode < 1:
        env_runner.reset()
        replay_buffers = env_runner.run()
        for replay_buffer in replay_buffers:
            env_runner.qmix_algo.episode_batch.add(replay_buffer)
        env_runner.qmix_algo.train()
        for episode in env_runner.episodes:
            env_runner.qmix_algo.update_targets(episode)

        for episode in env_runner.episodes:
            if episode % 500 == 0 and arglist.train:
                env_runner.qmix_algo.save_model('./saved/agents_' + str(episode))

        print(env_runner.win_counted_array)
        for idx, episode in enumerate(env_runner.episodes):
            print("Total reward in episode {} = {} and global step: {}".format(episode, env_runner.episode_reward[idx], env_runner.episode_global_step))

            if arglist.train:
                writer.add_scalar('Reward', env_runner.episode_reward[idx], episode)
                writer.add_scalar('Victory', env_runner.win_counted_array[idx], episode)


    if arglist.train == False:
        env_runner.save()
    
    env_runner.close()
Esempio n. 10
0
def runConfig(result,
              sa,
              blockx,
              blocky,
              blockz,
              problemSize,
              cacheSizes,
              config={}):
    print ' Running block size (%d, %d, %d) ...' % (blockx, blocky, blockz)
    runr = runner.Runner(sa)
    config['X Problem Size'] = problemSize
    config['Y Problem Size'] = problemSize
    config['Z Problem Size'] = problemSize
    config['X Block Size'] = blockx
    config['Y Block Size'] = blocky
    config['Z Block Size'] = blockz
    BW = []
    for cacheSize in cacheSizes:
        print '  Running cache size %g ...' % cacheSize
        config['$/thread group (kB)'] = cacheSize
        r = runr.runModel(config)
        BW.append(float(r['BW']) / 2**30)
    return {'problemSize': problemSize, 'blockx': blockx, 'blocky': blocky, 'blockz': blockz, \
            'cacheSizes': cacheSizes, 'BW': BW, 'WS': float(r['WS']) / 2**10, \
            'gflops': float(r['flops']) / 10**9, 'wgflops': float(r['wflops']) / 10**9, \
            'GPRegs': r['GPRegs'], 'FPRegs': r['FPRegs'], \
            'cputime': r['cputime'], 'ramtime': r['ramtime'], 'time': r['time']}
 def __init__(self, config_map):
     super().__init__(config_map)
     self.headers = {
         "Host":
         "search.map.daum.net",
         "Connection":
         "keep-alive",
         "User-Agent":
         "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) "
         "Chrome/84.0.4147.125 Safari/537.36",
         "Accept":
         "*/*",
         "Sec-Fetch-Site":
         "cross-site",
         "Sec-Fetch-Mode":
         "no-cors",
         "Sec-Fetch-Dest":
         "script",
         "Referer":
         "https://map.kakao.com/?from=total&nil_suggest=btn&q=%EC%97%AD%EC%82%BC%20%EC%B9%B4%ED%8E%98"
         "&tab=place",
         "Accept-Encoding":
         "gzip, deflate, br",
         "Accept-Language":
         "ko,en;q=0.9,en-US;q=0.8,ja;q=0.7"
     }
     self.addr_loader = addr_data_loader.AddrDataLoader()
     self.runner = runner.Runner()
     self.locks = {}
     self.progress_bar = {}
     self.progress_count = 0
     self.progress_lock = threading.Lock()
     self.completes = []
Esempio n. 12
0
	def start_task(self, show_progress, encoding, working_dir, directive, params):
		self._encoding = encoding
		self._show_progress = show_progress

		# Default the to the current files directory if no working directory was given
		if (working_dir == "" and self.window.active_view()
			        and self.window.active_view().file_name()):
		    working_dir = os.path.dirname(self.window.active_view().file_name())

		if not hasattr(self, 'output_view'):
		    # Try not to call get_output_panel until the regexes are assigned
		    self.output_view = self.window.get_output_panel("autolatex")

		self.output_view.settings().set("result_file_regex", "^(.*?):([0-9]+):(?:([0-9]+):)?\\s*(.*?)\\s*$")
		self.output_view.settings().set("result_line_regex", "^l\\.([0-9]+)\\s+")
		self.output_view.settings().set("result_base_dir", working_dir)

		# Call get_output_panel a second time after assigning the above
		# settings, so that it'll be picked up as a result buffer
		self.window.get_output_panel("autolatex")

		# Show the progress
		if self._show_progress:
			sublime.status_message(_T("Building [%d%%]") % int(0))

		autolatex_directory = utils.find_AutoLaTeX_directory(working_dir)
		self._thread = runner.Runner(
				self,
				autolatex_directory,
				directive,
				params)
		self._thread.start()
Esempio n. 13
0
def runApp(batchSize, threadnum, json, image_dir):
    """create runner """
    dpu = runner.Runner(json)

    listimage = os.listdir(image_dir)
    runTotal = len(listimage)
    """ pre-process all images """
    img = []
    for i in range(runTotal):
        image = cv2.imread(os.path.join(image_dir, listimage[i]))
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = image / 255.0
        img.append(image)
    """run with batch """
    threadAll = []
    time1 = time.time()
    for i in range(threadnum):
        t1 = threading.Thread(target=runDPU,
                              args=(dpu, img, i, batchSize, threadnum,
                                    runTotal))
        threadAll.append(t1)
    for x in threadAll:
        x.start()
    for x in threadAll:
        x.join()

    time2 = time.time()

    timetotal = time2 - time1
    fps = float(runTotal / timetotal)
    print("%.2f FPS" % fps)

    del dpu

    return
Esempio n. 14
0
    def createRunners(self):
        # move the current runners, if any, to the array with old runners
        if self.runners:
            self.oldCpuTimes.append(max(r.currentCpuTime for r in self.runners))
            self.oldRunners.extend(self.runners)
            self.runners = []

        # reset other state
        self.convergenceTime = None
        self.lastOfUpdateTime = time.time()

        self.runnerGeneration += 1

        bestParams = None
        if self.oldRunners and bool(g.getConfig("optimization.restartFromBestValue")):
            # get best params
            bestParams = self.getBestParams()

        for id in range(int(g.getConfig("optimization.runsPerJob"))):
            r = runner.Runner(self, id + 1, self.currentMethod, self.runnerGeneration)
            if not r.prepare(self.workDir, self.copasiFile, bestParams):
                g.log(LOG_ERROR, "{}: failed to create a runner".format(r.getName()))
                return False
            self.runners.append(r)

        # note that this may create more processes than the number of free CPU cores!
        for r in self.runners:
            r.execute()

        return True
Esempio n. 15
0
def main():
    args = parser.parse_args()
    logging.info('Loading graph %s' % args.graph_type)
    # seed = 125
    # graph_one = graph.Graph(graph_type=args.graph_type, cur_n=20, p=0.15,m=4, seed=seed)

    logging.info('Loading environment %s' % args.environment_name)
    env_class = environmentClass.Environment(args)

    logging.info('Loading agent...')
    agent_class = agentClass.Agent(env_class.graphs, args.model, args.lr,
                                   args.bs, args.n_step)

    if args.batch is not None:
        print("Running a batched simulation with {} agents in parallel...".
              format(args.batch))
        my_runner = runner.BatchRunner(env_class, agent_class, args.batch,
                                       args.verbose)
        final_reward = my_runner.loop(args.ngames, args.epoch, args.niter)
        print("Obtained a final average reward of {}".format(final_reward))
        agent_class.save_model()
    else:
        print("Running a single instance simulation...")
        my_runner = runner.Runner(env_class, agent_class, args.verbose)
        final_reward = my_runner.loop(args.ngames, args.epoch, args.niter)
        print("Obtained a final reward of {}".format(final_reward))
        agent_class.save_model()
Esempio n. 16
0
def app(image_dir,threads,model):

    listimage=os.listdir(image_dir)
    runTotal = len(listimage)

    global out_q
    out_q = [None] * runTotal

    g = xir.graph.Graph.deserialize(pathlib.Path(model))
    subgraphs = get_subgraph (g)
    all_dpu_runners = []
    for i in range(threads):
        all_dpu_runners.append(runner.Runner(subgraphs[0], "run"))

    ''' preprocess images '''
    print('Pre-processing',runTotal,'images...')
    img = []
    for i in range(runTotal):
        path = os.path.join(image_dir,listimage[i])
        img.append(preprocess_fn(path))

    '''run threads '''
    print('Starting',threads,'threads...')
    threadAll = []
    start=0
    for i in range(threads):
        if (i==threads-1):
            end = len(img)
        else:
            end = start+(len(img)//threads)
        in_q = img[start:end]
        t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
        threadAll.append(t1)
        start=end

    time1 = time.time()
    for x in threadAll:
        x.start()
    for x in threadAll:
        x.join()
    time2 = time.time()
    timetotal = time2 - time1

    fps = float(runTotal / timetotal)
    print("FPS=%.2f, total frames = %.0f , time=%.4f seconds" %(fps,runTotal, timetotal))


    ''' post-processing '''
    classes = ['zero','one','two','three','four','five','six','seven','eight','nine'] 
    correct = 0
    wrong = 0
    for i in range(len(out_q)):
        prediction = classes[out_q[i]]
        ground_truth, _ = listimage[i].split('_',1)
        if (ground_truth==prediction):
            correct += 1
        else:
            wrong += 1
    accuracy = correct/len(out_q)
    print('Correct:',correct,'Wrong:',wrong,'Accuracy:', accuracy)
Esempio n. 17
0
def main(argv):
    global threadnum
    """create runner """
    dpu = runner.Runner(argv[2])

    listimage = os.listdir(calib_image_dir)
    threadAll = []
    threadnum = int(argv[1])
    i = 0
    global runTotall
    runTotall = len(listimage)
    """image list to be run """
    img = []
    for i in range(runTotall):
        path = os.path.join(calib_image_dir, listimage[i])
        image = cv2.imread(path)
        img.append(input_fn.preprocess_fn(image))
    """run with batch """
    time1 = time.time()
    for i in range(int(threadnum)):
        t1 = threading.Thread(target=runInceptionV1,
                              args=(dpu, img, i * batchSize))
        threadAll.append(t1)
    for x in threadAll:
        x.start()
    for x in threadAll:
        x.join()

    time2 = time.time()

    timetotal = time2 - time1
    fps = float(runTotall / timetotal)
    print("%.2f FPS" % fps)

    del dpu
Esempio n. 18
0
def evaluate_image(load_checkpoint):
    """
    Creates a convolutional network, optionally loads its weights from file, and
    runs it on a random test image from CIFAR-10. Visualizes top predictions.

    Args:
        load_checkpoint: Boolean flag indicating if weights should be loaded from file.
    """
    # Load data from file into memory.
    class_names, _, _, images_test, labels_test = load_cifar10()

    # Create network.
    my_model = model.Model()

    # Create runner, optionally load a weights from file.
    my_runner = runner.Runner(model=my_model)
    if load_checkpoint:
        my_runner.load(os.path.join(".", "checkpoints", "my_model"))

    # Evaluate network on a random test image.
    image_index = np.random.randint(0, images_test.shape[0])
    image = images_test[image_index]
    label = labels_test[image_index]
    guess_class, guess_prob = my_runner.run(image)

    # Visualize the result.
    visualize_classification(image, label, guess_class, guess_prob, class_names)
Esempio n. 19
0
def GenerateData(
        jobDuration=1e3,  # duration of simulation
        simulation=None,
        yamlFile=None):

    if simulation is None:
        import runner
        simulation = runner.Runner()

    varDictDflt = simulation.params.copy()
    recordedData = []

    import random
    for i in range(numCells):
        # default parameters for model
        varDicti = varDictDflt.copy()
        # can iterate through jeys to make random
        randKey = random.choice(list(varDicti))
        varDicti[randKey] += 0.02 * np.random.randn(1)

        # place holder/perform simulation
        returnDict = dict()  # return results vector
        simulation.simulate(varDicti, returnDict, jobDuration=jobDuration)

        ## do output processing
        data = returnDict['data']
        tRef = data['t'] * 1e3  # turn [s] back to [ms]
        #print(jobDuration)
        caiRef = data[testState] + 0.005 * np.random.randn(np.shape(tRef)[0])

        plt.plot(tRef, caiRef)
        recordedDatai = {'t': tRef, 'Cai': caiRef}
        recordedData.append(recordedDatai)

    return recordedData
Esempio n. 20
0
    def load_model(self, model):
        """Load DPU models for both DNNDK runtime and VART.

        For DNNDK, this method will compile the ML model `*.elf` binary file,
        compile it into `*.so` file located in the destination directory
        on the target. This will make sure DNNDK libraries can work
        without problems.

        The ML model file, if not set explicitly, is required to be located
        in the same folder as the bitstream and hwh files.

        The destination folder by default is `/usr/lib`.

        Currently only `*.elf` files are supported as models. The reason is
        that `*.so` usually have to be recompiled targeting a specific
        rootfs.

        For VART, this method will automatically generate the `meta.json` file
        in the same folder as the model file.

        Parameters
        ----------
        model : str
            The name of the ML model binary. Can be absolute or relative path.

        """
        if os.path.isfile(model):
            abs_model = model
        elif os.path.isfile(self.overlay_dirname + "/" + model):
            abs_model = self.overlay_dirname + "/" + model
        else:
            raise ValueError(
                "File {} does not exist.".format(model))
        if not os.path.isdir(XCL_DST_PATH):
            raise ValueError(
                "Folder {} does not exist.".format(XCL_DST_PATH))

        if not model.endswith(".elf"):
            raise RuntimeError("Currently only elf files can be loaded.")
        else:
            if self.runtime == 'dnndk':
                kernel_name = get_kernel_name_for_dnndk(abs_model)
                model_so = "libdpumodel{}.so".format(kernel_name)
                _ = subprocess.check_output(
                    ["gcc", "-fPIC", "-shared", abs_model, "-o",
                     os.path.join(XCL_DST_PATH, model_so)])
            elif self.runtime == 'vart':
                model_name, kernel_name = get_kernel_name_for_vart(abs_model)
                runner_folder = os.path.dirname(os.path.abspath(abs_model))
                meta_json = os.path.join(runner_folder, 'meta.json')
                with open(meta_json, 'w') as f:
                    f.write('{\n')
                    f.write('"lib": "libvart-dpu-runner.so",\n')
                    f.write('"filename": "{}",\n'.format(model_name))
                    f.write('"kernel": [ "{}" ]\n'.format(kernel_name))
                    f.write('}')
                self.runner = runner.Runner(runner_folder)[0]
            else:
                raise ValueError('Runtime can only be dnndk or vart.')
def test3():
  simulation = runner.Runner(),
  yamlVarFile = "inputParams.yaml",
  testState = "Cai",         
  # parameters to vary 
  variedParamDict = {
    "kon":  [0.5,0.2],         
    "koff":  [5.0,0.2],         
    },

  # get trial simulation results 
  returnData = []
  for i in range(2):
    varDict={
            'kon':0.4+i*0.1,'koff':4.4}

    returnDict=simulate(
      varDict=varDict,        # dictionary of parameters to be used for simulation
 #   returnDict=dict(),    # dictionary output to be returned by simulation
      jobDuration = 25e3   # [ms]
    )

    cai = analyze.GetData('Cai',returnDict['data'])
    returnData.append( cai )
    

  if 1:
    raise RuntimeError("DSF")
    # once above works, do
    traces = cellDetect()
    for trace in traces:
        outputObj=outPutList["Cai"]
        outputObj.vals = cai
        outputObj.ts   = ts   

    results.cellNum=1
    allResults.append(results)
    print("make plots with cell number, legend") 

  timeRange = [0, 2] # where to measure 
  vals = cai
  outputList= { 
    #"Cai":OutputObj("Cai","val_vs_time",[  0, 2],
    #[1,0.5,0.15],timeInterpolations=[  0,1,2]) # check that interpolated values at 0, 100, 200 are 1, 0.5 ...
    "Cai":OutputObj("Cai","val_vs_time",timeRange,
    vals,timeInterpolations=ts) # check that interpolated values at 0, 100, 200 are 1, 0.5 ...
    }

  results = run(
    simulation,
    yamlVarFile = yamlVarFile,            
    variedParamDict = variedParamDict,
    jobDuration = 30e3, # [ms]
    numRandomDraws = 8,  
    numIters = 5,    
    #sigmaScaleRate = 0.45,
    outputList = outputList,
    debug = True
)
Esempio n. 22
0
def app(image_dir,threads,model):

    listimage=os.listdir(image_dir)
    runTotal = len(listimage)

    global out_q
    out_q = [None] * runTotal

    all_dpu_runners = []
    threadAll = []

    for i in range(threads):
        all_dpu_runners.append(runner.Runner(model)[0])

    ''' preprocess images '''
    img = []
    for i in range(runTotal):
        path = os.path.join(image_dir,listimage[i])
        img.append(preprocess_fn(path))

    '''run threads '''
    start=0
    for i in range(threads):
        if (i==threads-1):
            end = len(img)
        else:
            end = start+(len(img)//threads)
        in_q = img[start:end]
        t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
        threadAll.append(t1)
        start=end

    time1 = time.time()
    for x in threadAll:
        x.start()
    for x in threadAll:
        x.join()
    time2 = time.time()
    timetotal = time2 - time1

    fps = float(runTotal / timetotal)
    print("FPS=%.2f, total frames = %.0f , time=%.4f seconds" %(fps,runTotal, timetotal))


    ''' post-processing '''
    classes = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']  
    correct = 0
    wrong = 0
    print('output buffer length:',len(out_q))
    for i in range(len(out_q)):
        argmax = np.argmax((out_q[i]))
        prediction = classes[argmax]
        ground_truth, _ = listimage[i].split('_')
        if (ground_truth==prediction):
            correct += 1
        else:
            wrong += 1
    accuracy = correct/len(out_q)
    print('Correct:',correct,'Wrong:',wrong,'Accuracy:', accuracy)
Esempio n. 23
0
 def test_runner_logger_use_elsewhere(self, args):
     r = runner.Runner(
         args["network"], args["AMQPURL"], size=args["size"], seed=args["seed"]
     )
     assert r.AMQPURL is not None
     r._attach_data_collector("")
     assert r.logger is not None
     r.logger.debug("use elsewhere")
Esempio n. 24
0
    def __init__(self, config_map):
        super().__init__(config_map)
        if "addr_key" not in self.config_map["bot"]:
            raise Exception("invalid config (not exit addr key)")

        self.addr_loader = addr_data_loader.AddrDataLoader()
        self.runner = runner.Runner()
        self.addr_url = self.config_map["bot"]["addr_url"]
        self.addr_key = self.config_map["bot"]["addr_key"]
Esempio n. 25
0
def runApp(batchSize, threads, image_dir, model):

    listImage = os.listdir(image_dir)

    runTotal = len(listImage)

    global out_q
    out_q = [None] * runTotal
    g = xir.graph.Graph.deserialize(pathlib.Path(model))
    subgraphs = get_subgraph(g)
    assert len(subgraphs) == 1
    all_dpu_runners = []
    for i in range(threads):
        all_dpu_runners.append(runner.Runner(subgraphs[0], "run"))
    """ pre-process all images """
    img = []
    print(listImage)
    for i in range(len(listImage)):

        image = cv2.imread(os.path.join(image_dir, listImage[i]))
        cv2.imshow('test', image)
        cv2.waitKey()
        # image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        # image = cv2.resize(image,200,200)
        image = image.reshape(-1, 200, 200, 3).astype('float32')
        image = image / 255.0
        img.append(image)
    """run with batch """
    threadAll = []

    start = 0

    for i in range(threads):
        if (i == threads - 1):
            end = len(img)
        else:
            end = start + (len(img) // threads)
        in_q = img[start:end]
        t1 = threading.Thread(target=runDPU,
                              args=(i, start, all_dpu_runners[i], in_q,
                                    listImage))
        threadAll.append(t1)
        start = end
    time1 = time.time()
    for x in threadAll:
        x.start()
    for x in threadAll:
        x.join()
    time2 = time.time()
    timetotal = time2 - time1

    fps = float(runTotal / timetotal)
    print("FPS=%.2f, total frames = %.0f , time = %.4f seconds" %
          (fps, runTotal, timetotal))

    return
Esempio n. 26
0
def main(argv):
    global threadnum

    listimage = os.listdir(calib_image_dir)
    threadAll = []
    threadnum = int(argv[1])
    i = 0
    global runTotall
    runTotall = len(listimage)
    g = xir.graph.Graph.deserialize(pathlib.Path(argv[2]))
    subgraphs = get_subgraph(g)
    assert len(subgraphs) == 1  # only one DPU kernel
    all_dpu_runners = []
    for i in range(int(threadnum)):
        all_dpu_runners.append(runner.Runner(subgraphs[0], "run"))
    """image list to be run """
    xclbin_p = str("/mnt/dpu.xclbin")
    kernelName_p = "pp_pipeline_accel"
    deviceIdx_p = 0
    fpga_pp = waa_rt.PreProcess(xclbin_p, kernelName_p, deviceIdx_p)
    time1 = int(round(time.time() * 1000))
    img = []
    for i in range(runTotall):
        path = os.path.join(calib_image_dir, listimage[i])
        image = cv2.imread(path)
        rows, cols, channels = image.shape
        image = fpga_pp.preprocess_input(image, rows, cols)
        img.append(image)

    time_pre = int(round(time.time() * 1000))

    start = 0
    for i in range(int(threadnum)):
        if (i == threadnum - 1):
            end = len(img)
        else:
            end = start + (len(img) // threadnum)
        t1 = threading.Thread(target=runResnet50,
                              args=(all_dpu_runners[i], img[start:end],
                                    len(img[start:end])))
        threadAll.append(t1)
        start = end
    for x in threadAll:
        x.start()
    for x in threadAll:
        x.join()

    time2 = int(round(time.time() * 1000))
    timetotal = time2 - time1
    fps = float(runTotall * 1000 / timetotal)
    #print("Pre time: %d ms" %(time_pre - time1))
    #print("DPU + post time: %d ms" %(time2 - time_pre))
    #print("Total time : %d ms" %timetotal)
    #print("Total frames : %d" %len(img))
    print("Performance : %.2f FPS" % fps)
Esempio n. 27
0
    def test_does_remember_jobs(self):
        with tempfile.NamedTemporaryFile(mode='r') as outfile:
            self.runner.add_job('echo "%d" >> %s\n' % (0, outfile.name))
            self.runner = runner.Runner(cancel='cat %s' %
                                        self.cancel_file.name,
                                        queue=self.queue)
            self.runner.should_cancel = self.cancel_at([])

            self.runner.run()

            result = outfile.read()
        self.assertEqual(result, '0\n')
Esempio n. 28
0
def collectSMCSummaryData(dynxml):

    config = {}
    config['X Problem Size'] = 128
    config['Y Problem Size'] = 128
    config['Z Problem Size'] = 128
    config['X Block Size'] = 128
    config['Y Block Size'] = 128
    config['Z Block Size'] = 128
    config['$/thread group (kB)'] = 1024

    tests = [('lidryer'    , {'$/thread group (kB)':    0, 'Num Species': 9  }), \
             ('drm19'      , {'$/thread group (kB)':    0, 'Num Species': 21 }), \
             ('grimech30'  , {'$/thread group (kB)':    0, 'Num Species': 53 }), \
             ('hai'        , {'$/thread group (kB)':    0, 'Num Species': 71 }), \
             ('prf_ethanol', {'$/thread group (kB)':    0, 'Num Species': 107}), \
             ('lidryer'    , {'$/thread group (kB)':  1e9, 'Num Species': 9  }), \
             ('drm19'      , {'$/thread group (kB)':  1e9, 'Num Species': 21 }), \
             ('grimech30'  , {'$/thread group (kB)':  1e9, 'Num Species': 53 }), \
             ('hai'        , {'$/thread group (kB)':  1e9, 'Num Species': 71 }), \
             ('prf_ethanol', {'$/thread group (kB)':  1e9, 'Num Species': 107}), \
             ('lidryer',     {'$/thread group (kB)': 1024, 'Num Species':   9, 'X Block Size': 16, 'Y Block Size': 16}), \
             ('drm19',       {'$/thread group (kB)': 1024, 'Num Species':  21, 'X Block Size': 16, 'Y Block Size':  8}), \
             ('grimech30',   {'$/thread group (kB)': 1024, 'Num Species':  53, 'X Block Size': 8 , 'Y Block Size':  8}), \
             ('hai',         {'$/thread group (kB)': 1024, 'Num Species':  71, 'X Block Size': 8 , 'Y Block Size':  4}), \
             ('prf_ethanol', {'$/thread group (kB)': 1024, 'Num Species': 107, 'X Block Size': 8 , 'Y Block Size':  4})]

    dynSA = analyze.StaticAnalysis(dynxml)

    results = []
    for (c, t) in tests:
        for k in t.keys():
            config[k] = t[k]
        chemSA = analyze.StaticAnalysis(os.path.join(baseXMLDir, '%s.xml' % c))
        results.append((runner.Runner(dynSA).runModel(config),
                        runner.Runner(chemSA).runModel(config)))
        del results[-1][0]['functions']
        del results[-1][1]['functions']

    return results
Esempio n. 29
0
def main():

    np.random.seed(1)

    args = parser.parse_args()
    if args.train:
        train = True
    else:
        train = False 
    
    print("Running a single instance simulation...")
    agent_class = DoubleDQN_FC(skip_frame=4,
                         epsilon_init=1,
                         epsilon_final=0.25,
                         memory_size=2**14, #16384
                         batch_size=64,
                         discount=0.99, 
                         train=train, 
                         my_model=args.load_model)

    print([(key, agent_class.__dict__[key]) for key in agent_class.__dict__.keys()])
    my_runner = runner.Runner(args.env, agent_class, args.verbose)
    if not args.display:
        final_reward, loss_evo, reward_evo, avgq_evo = my_runner.loop(args.ngames, args.niter, train)
    else:
        final_reward, loss_evo, reward_evo, avgq_evo = my_runner.loop(args.ngames, args.niter,
            train, render=True)
    print("Obtained a final reward of {}".format(final_reward))


    ##
    save_path = './figs/'

    if train:
        # saving figures
        plt.plot(loss_evo)
        plt.title('loss evolution')
        #plt.show()
        plt.savefig(save_path + 'loss_evolution.png')
        plt.clf()

        plt.plot(avgq_evo)
        plt.title('average Q evolution')
        #plt.show()
        plt.savefig(save_path + 'mean_q_evolution.png')
        plt.clf()

        plt.plot(np.arange(args.ngames),reward_evo)
        plt.title('reward evolution')
        #plt.show()
        plt.savefig(save_path + 'reward_evolution.png')
        plt.clf()
Esempio n. 30
0
def run_agent(nb_episodes, args):
    env_class = environment.Environment()
    agent_class = arg_dico[args.agent]

    print("Running a single instance simulation...")
    name = args.agent
    my_runner = runner.Runner(env_class, agent_class(env_class), name)
    if name in ["RD", "QL"]:
        final_reward = my_runner.loop(nb_episodes)
        plot_results([final_reward], [args.agent])
    elif name in ["PI", "VI"]:
        policy, V = my_runner.loop(nb_episodes)
        plot_policy(policy, V, name)