def getProfile(files): timeList = [] overheadList = [] for f in files: prof1 = profiler.Profiler(f[0]) prof2 = profiler.Profiler(f[1]) execTime = prof1.getTime('exec') overhead = 2 * prof2.getTime('overhead') timeList.append(execTime) overheadList.append(overhead) print(timeList) print(overheadList) return [np.mean(timeList), np.mean(overheadList)]
def getOverHead(numStages, iters, logDir): stagesMeanList = [] for m in range(numStages): timeList = [] for i in range(iters): f = pjoin(logDir, 'dp_overhead_{}_{}.json'.format(m, i + 1)) prof = profiler.Profiler(f) execTime = 2 * prof.getTime('overhead') print(execTime / 2.) timeList.append(execTime) stagesMeanList.append(np.mean(timeList, dtype=int)) print(stagesMeanList) return stagesMeanList
def getProfile(name, numStages, iters, logDir): stagesMeanList = [] for m in range(numStages): timeList = [] for i in range(iters): f = pjoin(logDir, '{}_{}_{}.json'.format(name, m, i + 1)) prof = profiler.Profiler(f) execTime = prof.getTime('exec') memcpy = prof.getTime('memcpy') timeList.append([execTime, memcpy[0], memcpy[1]]) stagesMeanList.append(list(np.mean(timeList, axis=0, dtype=int))) assert (len(stagesMeanList) == numStages) assert (len(stagesMeanList[0]) == 3) return stagesMeanList
return text if __name__ == "__main__": g = ss.speech_syn("Gizmo", "Hi") data = "" while 1: listen() data = reply() # data = "yes" if data == "yes": questions_list = pr.Profiler().fetch_questions() while 1: print(questions_list) labelled_str = pr.Profiler().pro_labelling(questions_list[0]) g.say(labelled_str) listen() text = AU.Audio_recognizer(AUDIO_FILE).text() print(text) if data == "bye": break # help(AU) #
print(" ActivationShape {}".format(sessRet.shape)) print(" ActiationSize {}".format(activationSize)) print(" WeightSize {}".format(weightSize)) else: sessRet = sess.run(m.output, feed_dict=feed_dict, options=options, run_metadata=run_metadata) fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format( show_memory=False) with open(fName, 'w') as f: f.write(chrome_trace) f.close() prof = profiler.Profiler(fName, m.layerNames[idx]) execTime = prof.getTime('exec') memcpy = prof.getTime('memcpy') timeList.append([execTime, memcpy[0], memcpy[1]]) timeList = np.mean(timeList, axis=0) totalTimeList.append(timeList) if PRINT: execTime = timeList[0] memIn = timeList[1] memOut = timeList[2] print(" Exec {} us".format(execTime)) print(" MemcpyIn {} us".format(memIn)) print(" MemcpyOut {} us".format(memOut)) assert (len(totalTimeList) == len(m.layers)) for idx, time in enumerate(zip(totalTimeList[:-1], totalTimeList[1:])): # print("{} + {}".format(m.nameList[idx], m.nameList[idx+1]))
if __name__ == "__main__": g = ss.speech_syn("Gizmo", "Hi") review_list = [] while 1: listen() data = reply() print("FROM __MAIN__ => ", data) # data = "yes" if data == "yes": tag_value = pr.Profiler().pro_fetch_questions() index = 0 while 1: labelled_str = pr.Profiler().pro_labelling(tag_value[index]) g.say(labelled_str) listen() text = AU.Audio_recognizer(AUDIO_FILE).text() # print(text) s = pr.Profiler().pro_process(tag_value[index][0], text) if s[0] == 1: review_list.append(s) index += 1 if index == len(tag_value): print("-->", review_list) break