def dynamic(self): values = [] valuescopy = [] counter = 0 start = time.clock() for i in self.S: #print("i",i) if values == []: values.append(i) else: valuescopy = copy.copy(values) #print ("length",len(valuescopy)) while counter < len(valuescopy): #print(len(valuescopy), " ", counter) values.append(valuescopy[counter] + i) if valuescopy[counter] + i == self.t: print("Target value found using subset of ", self.S) break #print("values",values) counter += 1 values.append(i) counter = 0 print(values) end = time.clock() print(end - start)
def time_solve(grid): start = time.clock() values = solve(grid) t = time.clock()-start ## Display puzzles that take long enough if showif is not None and t > showif: display(grid_values(grid)) if values: display(values) print '(%.2f seconds)\n' % t return (t, solved(values))
def time_solve(grid): start = time.clock() values = solve(grid) t = time.clock() - start ## Display puzzles that take long enough if showif is not None and t > showif: display(grid_values(grid)) if values: display(values) print '(%.2f seconds)\n' % t return (t, solved(values))
def _recompute_path(self, map, start, end): pf = PathFinder(map.successors, map.move_cost, map.move_cost) t = time.clock() pathlines = list(pf.compute_path(start, end)) dt = time.clock() - t if pathlines == []: print "No path found" return pathlines else: print "Found path (length %d)" % len(pathlines) return pathlines
def __init__(self, session, data, val=None): Screen.__init__(self, session, data) self.setTitle(_("EMC Cover Selecter")) self["actions"] = HelpableActionMap(self, "EMCimdb", { "EMCEXIT": self.exit, #"green": self.keySave, #"cancel": self.keyClose, "EMCOK": self.ok, }, -1) (title, o_path) = data.pop() self.m_title = title self["m_info"] = Label(("%s") % self.m_title) self.o_path = o_path self.menulist = [] self["menulist"] = imdblist([]) self["poster"] = Pixmap() self["info"] = Label(_("Searching for %s") % self.m_title) self["menulist"].onSelectionChanged.append(self.showInfo) self.check = "false" self.path = "/tmp/tmp.jpg" self.cover_count = 0 self.einzel_start_time = time.clock() self.einzel_elapsed = time.clock() self.einzel_end_time = time.clock() self.picload = ePicLoad() #self.picload_conn = self.picload.PictureData.connect(self.showCoverCallback) self["info"].setText((_("found") + " %s " + _("covers")) % (self.cover_count)) if val is not None: if val == "0": self.searchCover(self.m_title) elif val == "2": self.searchcsfd(self.m_title) elif val == "3": self.searchcsfd(self.m_title) self.searchtvdb(self.m_title) self.searchCover(self.m_title) else: if config.EMC.imdb.singlesearch.value == "0": self.searchCover(self.m_title) elif config.EMC.imdb.singlesearch.value == "1": self.searchtvdb(self.m_title) elif config.EMC.imdb.singlesearch.value == "2": self.searchcsfd(self.m_title) elif config.EMC.imdb.singlesearch.value == "3": self.searchcsfd(self.m_title) self.searchtvdb(self.m_title) self.searchCover(self.m_title)
def try_at_random(self): """ This randomly tries different subsets to see if there is any combination that solves the problem. """ start = time.clock() candidate = [] total = 0 while total != self.t: candidate = sample(self.S, randint(0, self.n)) total = sum(candidate) print("(Random) Trying: ", candidate, ", sum:", total) if total == self.t: print("Success!") end = time.clock() print(end - start)
def get_performance_sort(): """Get performance""" scores = {} trial = 1 while trial <= 16: numbers = [random.randint(1,9) for i in range(2**trial)] now = time.clock() numbers.sort() done = time.clock() scores[trial] = (done-now) trial = trial + 1 for i in scores: print("%d\t%f" %(2**i,scores[i]))
def get_performance_sum(): """Calculating """ scores = {} trial = 1 while trial <= 16: numbers = [random.randint(1,9) for i in range(2**trial)] now = time.clock() sum = 0 for d in numbers: sum = sum + d done = time.clock() scores[trial] = (done-now) trial = trial + 1 for i in scores: print("%d\t%f" %(2**i,scores[i]))
def progress(self, i, n): now = time.clock() if not self.last: self.last = now if now - self.last > self.freq: diff = now - self.start self.last = now i_rate = float(i) / diff self.rates.append(i_rate) if len(self.rates) > self.rates.maxlen / 2: rate = sum(self.rates) / len(self.rates) rate_type = 'a' else: rate = i_rate rate_type = 'i' msg = '{}: Compressed: {} Mb. Downloaded, Uncompressed: {:6.2f} Mb, {:5.2f} Mb / s ({})'\ .format( self.message, int(int(n) / (1024 * 1024)), round(float(i) / (1024. * 1024.), 2), round(float(rate) / (1024 * 1024), 2), rate_type) self.printf(msg)
def search_done(self): #if self.search_check == "2": self.check = "true" self.showInfo() self.einzel_end_time = time.clock() self.einzel_elapsed = (self.einzel_end_time - self.einzel_start_time) self["info"].setText(_("found %s covers in %.1f sec") % (self.cover_count, self.einzel_elapsed))
def progress(self, i, n): import time now = time.clock() if not self.last: self.last = now if now - self.last > self.freq: diff = now - self.start self.last = now i_rate = float(i)/diff self.rates.append(i_rate) if len(self.rates) > self.rates.maxlen/2: rate = sum(self.rates) / len(self.rates) rate_type = 'a' else: rate = i_rate rate_type = 'i' self.printf("{}: Compressed: {} Mb. Downloaded, Uncompressed: {:6.2f} Mb, {:5.2f} Mb / s ({})".format( self.message,int(int(n)/(1024*1024)), round(float(i)/(1024.*1024.),2), round(float(rate)/(1024*1024),2), rate_type))
def showCovers(self, data, title): print "EMB iMDB: Cover Select - %s" % title #print data bild = re.findall('<img src="http://ia.media-imdb.com/images/(.*?)".*?<a href="/title/(.*?)/".*?">(.*?)</a>.*?\((.*?)\)', data, re.S) if bild: for each in bild: #print self.cover_count self.cover_count = self.cover_count + 1 imdb_title = each[2] imdb_url = each[0] imdb_url = re.findall('(.*?)\.', imdb_url) extra_imdb_convert = "._V1_SX320.jpg" imdb_url = "http://ia.media-imdb.com/images/%s%s" % (imdb_url[0], extra_imdb_convert) self.menulist.append(showCoverlist(imdb_title, imdb_url, self.o_path, "imdb: ")) else: self["info"].setText(_("Nothing found for %s") % title) print "EMC iMDB: keine infos gefunden - %s" % title self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(28) self.search_check += 1 #if not config.EMC.imdb.singlesearch.value == "3": self.check = "true" self.showInfo() self.einzel_end_time = time.clock() self.einzel_elapsed = (self.einzel_end_time - self.einzel_start_time) self["info"].setText(_("found %s covers in %.1f sec") % (self.cover_count, self.einzel_elapsed))
def __init__(self, message='Download', printf = _print): import time from collections import deque self.start = time.clock() self.message = message self.rates = deque(maxlen=10) self.printf = printf
def search_done(self): self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(28) self.check = "true" self.showInfo() self.einzel_end_time = time.clock() self.einzel_elapsed = (self.einzel_end_time - self.einzel_start_time) self["info"].setText((_("found") + " %s " + _("covers in") + " %.1f " + _("sec")) % (self.cover_count, self.einzel_elapsed))
def search_done(self): self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(image()) self.check = "true" self.showInfo() self.einzel_end_time = time.clock() self.einzel_elapsed = (self.einzel_end_time - self.einzel_start_time) self["info"].setText((_("found") + " %s " + _("covers in") + " %.1f " + _("sec")) % (self.cover_count, self.einzel_elapsed))
def start(self): """Start the meaurement process. Arguments: - `self`: """ self.start_time = time.time() self.start_clock = time.clock() pass
def stop(self): """Stop the measurement process. Arguments: - `self`: """ self.stop_time = time.time() self.stop_clock = time.clock() pass
def exhaustive(self): start = time.clock() total = 0 subsets = [[]] next = [] for j in self.S: for i in subsets: next.append(i + [j]) subsets += next next = [] print(subsets) for i in subsets: total = sum(i) if total == self.t: print(i, " is subset of ", self.S, "that equals ", self.t) end = time.clock() print(end - start) break
def main(): # TODO HL-DONE: 1. Define start_time to measure total program runtime by # collecting start time start_time = time.clock() # TODO HL-DONE: 2. Define get_input_args() function to create & retrieve command # line arguments in_arg = get_input_args() # TODO HL-DONE: 3. Define get_pet_labels() function to create pet image labels by # creating a dictionary with key=filename and value=file label to be used # to check the accuracy of the classifier function answers_dic = get_pet_labels() # TODO HL-DONE: 4. Define classify_images() function to create the classifier # labels with the classifier function uisng in_arg.arch, comparing the # labels, and creating a dictionary of results (result_dic) result_dic = classify_images() # TODO HL-DONE: 5. Define adjust_results4_isadog() function to adjust the results # dictionary(result_dic) to determine if classifier correctly classified # images as 'a dog' or 'not a dog'. This demonstrates if the model can # correctly classify dog images as dogs (regardless of breed) adjust_results4_isadog() # TODO HL-DONE: 6. Define calculates_results_stats() function to calculate # results of run and puts statistics in a results statistics # dictionary (results_stats_dic) results_stats_dic = calculates_results_stats() # TODO HL-DONE: 7. Define print_results() function to print summary results, # incorrect classifications of dogs and breeds if requested. print_results() # TODO HL-DONE: 1. Define end_time to measure total program runtime # by collecting end time end_time = time.clock() # TODO HL-DONE: 1. Define tot_time to computes overall runtime in # seconds & prints it in hh:mm:ss format tot_time = end_time - start_time print("\n** Total Elapsed Runtime:", tot_time)
def showCovers_csfd(self, data, title): bild = re.findall('<img src=\"(//img.csfd.cz/files/images/film/posters/.*?)\".*?<h3 class="subject"><a href="(.*?)" class="film c.">(.*?)</a>.*?</li>', data, re.DOTALL | re.IGNORECASE) if bild: for each in bild: print "EMC csfd: Cover Select - %s" % title self.cover_count = self.cover_count + 1 csfd_title = each[2] csfd_detail_url = "http://www.csfd.cz" + each[1] print "csfd_detail_url: %s" % csfd_detail_url csfd_url = "http:" + each[0] print "csfd_url: %s" % csfd_url self.menulist.append(showCoverlist(csfd_title, csfd_url, self.o_path, 'csfd: ')) self.searchcsfd_detail(csfd_detail_url, csfd_title) else: title_s = re.findall('<title>(.*?)\|', data, re.S) if title_s: csfd_title = title_s[0] print "EMC iMDB csfd: Movie found - %s" % csfd_title else: csfd_title = title bild = re.findall('<img src="(//img.csfd.cz/files/images/film/posters/.*?)" alt="poster"', data, re.DOTALL | re.IGNORECASE) if bild: print "EMC iMDB csfd: Cover Select - %s" % title self.cover_count = self.cover_count + 1 csfd_url = "http:" + bild[0].replace('\\','').strip() print "csfd_url: %s" % csfd_url self.menulist.append(showCoverlist(csfd_title, csfd_url, self.o_path, "csfd: ")) bild = re.findall('<h3>Plak.*?ty</h3>(.*?)</table>', data, re.S) if bild: bild1 = re.findall('style=\"background-image\: url\(\'(.*?)\'\)\;', bild[0], re.DOTALL | re.IGNORECASE) if bild1: for each in bild1: print "EMC iMDB - csfd: Cover Select - %s" % title self.cover_count = self.cover_count + 1 csfd_url = "http:" + each.replace('\\','').strip() print "csfd_url: %s" % csfd_url self.menulist.append(showCoverlist(csfd_title, csfd_url, self.o_path, "csfd: ")) else: print "EMC iMDB csfd 3 : no else covers - %s" % title else: print "EMC iMDB csfd 2 : no else covers - %s" % title else: print "EMC iMDB csfd 1 : keine infos gefunden - %s" % title self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(28) self.search_check += 1 if not config.EMC.imdb.singlesearch.value == "3": self.check = "true" self.showInfo() self.einzel_end_time = time.clock() self.einzel_elapsed = self.einzel_end_time - self.einzel_start_time self["info"].setText(("found %s covers in %.1f sec") % (self.cover_count, self.einzel_elapsed))
def __init__(self, session, data): Screen.__init__(self, session, data) self["actions"] = HelpableActionMap(self, "EMCimdb", { "EMCEXIT": self.exit, #"green": self.keySave, #"cancel": self.keyClose, "EMCOK": self.ok, }, -1) (title, o_path) = data.pop() self.title = title self.o_path = o_path self.menulist = [] self["menulist"] = imdblist([]) self["poster"] = Pixmap() self["info"] = Label(_("Searching for %s") % self.title) self["menulist"].onSelectionChanged.append(self.showInfo) self.check = "false" self.path = "/tmp/tmp.jpg" self.cover_count = 0 self.einzel_start_time = time.clock() self.einzel_elapsed = time.clock() self.einzel_end_time = time.clock() self.picload = ePicLoad() #self.picload.PictureData.get().append(self.showCoverCallback) self["info"].setText((_("found") + " %s " + _("covers")) % (self.cover_count)) if config.EMC.imdb.singlesearch.value == "0": self.searchCover(self.title) elif config.EMC.imdb.singlesearch.value == "1": self.searchtvdb(self.title) elif config.EMC.imdb.singlesearch.value == "2": self.searchcsfd(self.title) elif config.EMC.imdb.singlesearch.value == "3": self.searchcsfd(self.title) self.searchtvdb(self.title) self.searchCover(self.title)
def display_download(self, movie_title, search_title, path): self.counter += 1 self.counter_download = self.counter_download + 1 self.end_time = time.clock() elapsed = (self.end_time - self.start_time) * 10 self.count = _("%s: %s von %s") % (self.showSearchSiteName, self.counter, self.count_movies) self["info"].setText(self.count) self["m_info"].setText(movie_title) self["download"].setText(_("Download: %s") % str(self.counter_download)) self.menulist.append(imdb_show(movie_title, path, str(elapsed), "", search_title)) self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(28) if self.count_movies == self.counter: self.check = "true" self.init_ende()
def display_download(self, movie_title, search_title, path): self.counter += 1 self.counter_download = self.counter_download + 1 search_title = getMovieNameWithoutPhrases(getMovieNameWithoutExt(search_title)) self.end_time = time.clock() elapsed = (self.end_time - self.start_time) * 10 self.count = ("%s: %s " + _("from") + " %s") % (self.showSearchSiteName, self.counter, self.count_movies) self["info"].setText(self.count) self["m_info"].setText(movie_title) self["download"].setText(_("Download: %s") % str(self.counter_download)) self.menulist.append(imdb_show(movie_title, path, str(elapsed), "", search_title)) self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(28) if self.count_movies == self.counter: self.check = "true" self.init_ende()
def imdb(self): if self.running == "true": print "EMC iMDB: Search already Running." elif self.running == "false": print "EMC iMDB: Search started..." self["done_msg"].show() self.no_cover() self.running = "true" self.counter = 0 self.counter2 = 0 self.counter3 = 0 self.counter_download = 0 self.counter_exist = 0 self.counter_no_poster = 0 self.t_elapsed = 0 self.menulist = [] self.count_movies = len(self.m_list) self["exist"].setText(_("Exist: %s") % "0") self["no_poster"].setText(_("No Cover: %s") % "0") self["download"].setText(_("Download: %s") % "0") self["done_msg"].setText(_("Searching...")) self.counter_a = 0 self.starttime = 0 self.t_start_time = time.clock() self.s_supertime = time.time() self.cm_list = self.m_list[:] self.search_list = [] self.exist_list = [] self.check = "false" self["done_msg"].setText(_("Creating Search List..")) for each in sorted(self.cm_list): (title, path) = each path = re.sub(self.file_format + "$", '.jpg', path) if os.path.exists(path): self.counter2 += 1 print "EMC iMDB: Cover vorhanden - %s" % title self.display_exist(title, path) else: elem2 = (title, path) self.search_list.append(elem2) #print "exist:", self.exist_list #print "search:", self.search_list self.imdb_start()
def __init__(self, session, data): Screen.__init__(self, session, data) self["actions"] = HelpableActionMap(self, "EMCimdb", { "EMCEXIT": self.exit, #"green": self.keySave, #"cancel": self.keyClose, "EMCOK": self.ok, }, -1) (title, o_path) = data.pop() self.title = title self.o_path = o_path self.menulist = [] self["menulist"] = imdblist([]) self["poster"] = Pixmap() self["info"] = Label(_("Searching for %s") % self.title) self["menulist"].onSelectionChanged.append(self.showInfo) self.check = "false" self.path = "/tmp/tmp.jpg" self.cover_count = 0 self.search_check = 0 self.einzel_start_time = time.clock() self.picload = ePicLoad() #self.picload.PictureData.get().append(self.showCoverCallback) if config.EMC.imdb.singlesearch.value == "0": self.searchCover(self.title) #self.search_done() if config.EMC.imdb.singlesearch.value == "1": self.searchtvdb(self.title) #self.search_done() if config.EMC.imdb.singlesearch.value == "2": self.searchcsfd(self.title) #self.search_done() if config.EMC.imdb.singlesearch.value == "3": self.searchcsfd(self.title) self.searchtvdb(self.title) self.searchCover(self.title)
def showCovers_tvdb(self, data, title): bild = re.findall('<poster>(.*?)</poster>', data) if bild: print "EMB iMDB: Cover Select - %s" % title self.cover_count = self.cover_count + 1 print "http://www.thetvdb.com/banners/_cache/%s" % bild[0] tvdb_url = "http://www.thetvdb.com/banners/_cache/%s" % bild[0] print "bild:", tvdb_url self.menulist.append(showCoverlist(title, tvdb_url, self.o_path, "tvdb: ")) else: #self["info"].setText(_("Nothing found for %s") % title) print "EMC iMDB tvdb: keine infos gefunden - %s" % title self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(28) self.search_check += 1 if not config.EMC.imdb.singlesearch.value == "3": self.check = "true" self.showInfo() self.einzel_end_time = time.clock() self.einzel_elapsed = (self.einzel_end_time - self.einzel_start_time) self["info"].setText(_("found %s covers in %.1f sec") % (self.cover_count, self.einzel_elapsed))
def wrapper(*args, **kwargs): t = time.clock() res = func(*args, **kwargs) print(func.__name__, time.clock()-t) return res
import time from tree import Tree import parallel_cell_enumeration as p_cell import parallel_masterprob as p_ma import pre_process as pre import subprob as sub # Generate data sets of different scales and then test different seeds. # export OMP_NUM_THREADS=2 (Environment Variable OMP_NUM_THREADS sets the number of threads) if __name__ == "__main__": start_all = time.clock() NUM_CORES = 64 #Parallel pool = Pool(processes=NUM_CORES) pool = None # Different size of test data set M, K, N M_list = [1] #[1, 4, 8, 16, 32] K_list = [2] #[2, 4, 6] N_list = [10] #[4, 8, 12] # Generate test data sets for i_1 in xrange(len(M_list)): for i_2 in xrange(len(K_list)):
def imdb_start(self): self["done_msg"].setText(_("Searching..")) self.starttime = time.time() self.run10 = "false" for i in xrange(10): #if self.search_list: if not len(self.search_list) == 0: (title, path) = self.search_list.pop() self.start_time = time.clock() if config.EMC.imdb.search.value == "0": self.name = title.replace(' ','.').replace(':','.').replace('..','.') path = re.sub(self.file_format + "$", '.jpg', path) search_title = self.name.replace('.',' ') if not os.path.exists(path): self.counter3 += 1 url = "http://www.imdbapi.com/?t=" + self.name.replace('ö','%F6') ##replace('ö','%F6') print "EMC imdbapi.com:", url getPage(url, timeout = 10).addCallback(self.imdbapi, search_title, path).addErrback(self.errorLoad, search_title) if config.EMC.imdb.search.value == "1": self.name = title.replace(' ','+').replace(':','+').replace('-','').replace('++','+') path = re.sub(self.file_format + "$", '.jpg', path) search_title = self.name.replace('+',' ') if not os.path.exists(path): self.counter3 += 1 url = "http://api.themoviedb.org/2.1/Movie.search/de/xml/8789cfd3fbab7dccf1269c3d7d867aff/" + self.name print "EMC themoviedb.org:", url getPage(url, timeout = 10).addCallback(self.themoviedb, search_title, path).addErrback(self.errorLoad, search_title) if config.EMC.imdb.search.value == "2": self.name = title.replace(' ','+').replace(':','+').replace('-','').replace('++','+') path = re.sub(self.file_format + "$", '.jpg', path) search_title = self.name.replace('+',' ') if not os.path.exists(path): self.counter3 += 1 url = "http://ofdbgw.home-of-root.de/search/%s" % self.name #url = "http://ofdbgw.home-of-root.de/search/" + self.name print "EMC ofdb.de:", url getPage(url).addCallback(self.ofdb_search, search_title, path).addErrback(self.errorLoad, search_title) if config.EMC.imdb.search.value == "3": self.name = title.replace(':', ' ').replace('-', ' ').replace('++', '+') path = re.sub(self.file_format + "$", '.jpg', path) search_title = urllib.quote(self.name.replace('+', ' ')) if not os.path.exists(path): self.counter3 += 1 url = "http://www.csfd.cz/hledat/?q=%s" % search_title print "EMC csfd.cz:", url getPage(url).addCallback(self.csfd_search, self.name, path).addErrback(self.errorLoad, self.name) else: print "EMC iMDB: MovieList is empty, search is DONE. - BREAK..." #self.e_supertime = time.time() #total_movie = self.counter3 + self.counter2 #total_time = self.e_supertime - self.s_supertime #avg = (total_time / total_movie) #self.done = _("%s Filme in %.1f sec gefunden. Avg. Speed: %.1f sec") % (total_movie, total_time, avg) #self["done_msg"].setText(self.done) #self.running = "false" break
# record the time for each iteration time_iteration = [] print "Start optimizing..." index = 0 global num_node num_node = 0 current_node = 0 #Claim a tree tree = Tree() node = tree.add_node(current_node, theta_L, theta_U) num_node = num_node + 1 start_all = time.clock() print x_all[-1] while index < MAXITER: start = time.clock() print "----------------------------iteration %d---------------------------" % index # Solve the subproblem objOpt, thetaOpt, lamOpt, muOpt = sub.solve_subproblem( y, xBar) #(objOpt) upper bound thetaBar.append(thetaOpt) lamBar.append(lamOpt) muBar.append(muOpt) SUBD = np.amin([objOpt, SUBD])
import time from tree import Tree import parallel_cell_enumeration as p_cell import parallel_masterprob as p_ma import pre_process as pre import subprob as sub # Generate data sets of different scales and then test different seeds. # export OMP_NUM_THREADS=2 (Environment Variable OMP_NUM_THREADS sets the number of threads) if __name__ == "__main__": start_all = time.clock() NUM_CORES = 64 #Parallel pool = Pool(processes=NUM_CORES) pool = None # Different size of test data set M, K, N M_list = [1] #[1, 4, 8, 16, 32] K_list = [3] #[2, 4, 6] N_list = [2] #[4, 8, 12] # Generate test data sets for i_1 in xrange(len(M_list)): for i_2 in xrange(len(K_list)):
t0 = timeit.default_timer() factorial(10**5) print('Elapsed time :', timeit.default_timer() - t0) #method 3: using cprofile() import cProfile def factorial(n): num = 1 while n >= 1: num = num * n n = n - 1 return num cProfile.run('factorial(10**5)') #method 4: simple method import time start = time.clock() s = 'geeks' U = [] for c in s: U.append(c.upper()) print U elapsed = time.clock() e1 = elapsed - start print "Time spent in function is: ", e1
def parseWebpage(self, data, type, title, url, cover_path, season, episode): data = six.ensure_str(data) self.counting += 1 self.start_time = time.clock() if type == "movie": list = [] list = re.findall( '"poster_path":"\\\(.*?)".*?"original_title":"(.*?)"', data, re.S) if list: purl = "http://image.tmdb.org/t/p/%s%s" % ( config.movielist.imdb.preferred_coversize.value, list[0][0]) purl = six.ensure_binary(purl) self.counter_download += 1 self.end_time = time.clock() elapsed = (self.end_time - self.start_time) * 1000 self.menulist.append( self.imdb_show(title, cover_path, '%.1f' % elapsed, "", title)) if not fileExists(cover_path): downloadPage(purl, cover_path) if config.movielist.imdb.savetotxtfile.value: idx = [] idx = re.findall('"id":(.*?),', data, re.S) if idx: iurl = "http://api.themoviedb.org/3/movie/%s?api_key=8789cfd3fbab7dccf1269c3d7d867aff&language=de" % idx[ 0] iurl = six.ensure_binary(iurl) getPage(iurl, headers={ 'Content-Type': 'application/x-www-form-urlencoded' }).addCallback(self.getInfos, id, type, cover_path) else: self.counter_no_poster += 1 self.menulist.append( self.imdb_show(title, cover_path, _("N/A"), "", title)) elif type == "serie": list = [] list = re.findall('<seriesid>(.*?)</seriesid>', data, re.S) if list: x = config.movielist.imdb.thetvdb_standardcover.value purl = "https://www.thetvdb.com/banners/_cache/posters/%s-%s.jpg" % ( list[0], x) if x > 1 and not urlExist(purl): x = 1 purl = "https://www.thetvdb.com/banners/_cache/posters/%s-%s.jpg" % ( list[0], x) self.counter_download += 1 self.end_time = time.clock() elapsed = (self.end_time - self.start_time) * 1000 self.menulist.append( self.imdb_show(title, cover_path, '%.1f' % elapsed, "", title)) if not fileExists(cover_path): downloadPage(purl, cover_path) if config.movielist.imdb.savetotxtfile.value: if season and episode: iurl = "https://www.thetvdb.com/api/2AAF0562E31BCEEC/series/%s/default/%s/%s/de.xml" % ( list[0], str(int(season)), str(int(episode))) iurl = six.ensure_binary(iurl) getPage(iurl, headers={ 'Content-Type': 'application/x-www-form-urlencoded' }).addCallback(self.getInfos, id, type, cover_path) else: self.counter_no_poster += 1 self.menulist.append( self.imdb_show(title, cover_path, _("N/A"), "", title)) self.count = ("%s: %s " + _("from") + " %s") % ( self.showSearchSiteName, self.counting, self.count_total) self["info"].setText(self.count) self["no_poster"].setText( _("No Cover: %s") % str(self.counter_no_poster)) self["exist"].setText(_("Exist: %s") % str(self.counter_exist)) self["download"].setText( _("Download: %s") % str(self.counter_download)) self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(self.itemHeight) self.check = True if self.counting == self.count_total: self.e_supertime = time.time() total_time = self.e_supertime - self.s_supertime avg = (total_time / self.count_total) self.done = ("%s " + _("movies in") + " %.1f " + _("sec found. Avg. Speed:") + " %.1f " + _("sec.")) % (self.count_total, total_time, avg) self["done_msg"].setText(self.done) self.running = False self.showInfo()
def parallel_cell_numeration(coefficients, nCuts, dim, threshold, pool=None, certain_hyperplane=0): ''' Get the thetaB_list of all the regions. coefficients: the coefficients of all hyperplanes. nCuts: the number of hyperplanes. Dim: the dimension of space. threshold: the tolerance of the distance from the point to hyperplane certain_hyperplane: assume the sign of this hyperplane does not change. ''' #sub_NUM_CORES = 10 #Parallel #sub_pool = Pool(processes=sub_NUM_CORES) thetaB_list = [] #the structure to manage the process of all the function. #openset = Queue.Queue() openset = [] closedset = [] Initialized_region = initialize_region(coefficients, nCuts, dim, threshold) #openset.put(Initialized_region) openset.append(Initialized_region) print('\nfinding adjacent regions...') # non-parallel if pool == None: while len(openset) != 0: #get the region and delete it from the openset. region = openset.pop(0) closedset.append(region) #calculate the adjacency regions adj_regions = cal_adjacency_regions(coefficients, nCuts, dim, region, certain_hyperplane, threshold) #, sub_pool) for region in adj_regions: #if region never appears. if region not in openset and region not in closedset: openset.append(region) # parallel else: start = time.clock() while len(openset) != 0: closedset.extend(openset) results = [ pool.apply_async(cal_adjacency_regions, args=(coefficients, nCuts, dim, region, certain_hyperplane, threshold, None, closedset)) for region in openset ] #set openset is empty openset = [] #get the result for p in results: openset.extend(p.get()) #get the unique list openset.sort() openset = list(openset for openset, _ in itertools.groupby(openset)) end = time.clock() time_adj_region = end - start print('\nFinish finding adjacent regions using: %0.2f' % (time_adj_region)) #Reflection for region in closedset: thetaB_list.append(region) ref_thetaB = reflection(region) thetaB_list.append(ref_thetaB) #length = len(thetaB_list) return thetaB_list
def parseWebpage(self, data, type, title, url, cover_path, season, episode): self.counting += 1 self.start_time = time.clock() if type == "movie": list = [] list = re.findall('original_title":"(.*?)".*?"poster_path":"(.*?)"', data, re.S) if list: purl = "http://image.tmdb.org/t/p/%s%s" % (config.EMC.imdb.themoviedb_coversize.value, list[0][1]) self.counter_download += 1 self.end_time = time.clock() elapsed = (self.end_time - self.start_time) * 10 self.menulist.append(imdb_show(title, cover_path, str(elapsed), "", title)) if not fileExists(cover_path): downloadPage(purl, cover_path).addErrback(self.dataError) else: self.counter_no_poster += 1 self.menulist.append(imdb_show(title, cover_path, _("N/A"), "", title)) # get description if config.EMC.imdb.savetotxtfile.value: idx = [] idx = re.findall('"id":(.*?),', data, re.S) if idx: iurl = "http://api.themoviedb.org/3/movie/%s?api_key=8789cfd3fbab7dccf1269c3d7d867aff&language=de" % idx[0] getPage(iurl, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getInfos, id, type, cover_path).addErrback(self.dataError) elif type == "serie": list = [] list = re.findall('<seriesid>(.*?)</seriesid>', data, re.S) if list: purl = "http://www.thetvdb.com/banners/_cache/posters/%s-1.jpg" % list[0] #self.guilist.append(((cleanTitle, True, filename),)) self.counter_download += 1 self.end_time = time.clock() elapsed = (self.end_time - self.start_time) * 10 self.menulist.append(imdb_show(title, cover_path, str(elapsed), "", title)) if not fileExists(cover_path): downloadPage(purl, cover_path).addErrback(self.dataError) # get description if config.EMC.imdb.savetotxtfile.value: if season and episode: iurl = "http://www.thetvdb.com/api/2AAF0562E31BCEEC/series/%s/default/%s/%s/de.xml" % (list[0], str(int(season)), str(int(episode))) getPage(iurl, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getInfos, id, type, cover_path).addErrback(self.dataError) else: self.counter_no_poster += 1 self.menulist.append(imdb_show(title, cover_path, _("N/A"), "", title)) self.count = ("%s: %s " + _("from") + " %s") % (self.showSearchSiteName, self.counting, self.count_total) self["info"].setText(self.count) self["no_poster"].setText(_("No Cover: %s") % str(self.counter_no_poster)) self["exist"].setText(_("Exist: %s") % str(self.counter_exist)) self["download"].setText(_("Download: %s") % str(self.counter_download)) self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(image()) self.check = True if self.counting == self.count_total: self.e_supertime = time.time() total_time = self.e_supertime - self.s_supertime avg = (total_time / self.count_total) self.done = ("%s " + _("movies in") + " %.1f " + _("sec found. Avg. Speed:") + " %.1f " + _("sec.")) % (self.count_total, total_time, avg) self["done_msg"].setText(self.done) self.running = False
def __init__(self, message='Download', printf=print_): self.start = time.clock() self.message = message self.rates = deque(maxlen=10) self.printf = printf
model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='tanh')) model.add(Dense(num_classes, activation='softmax')) # Compile model model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model # build the model model = baseline_model() model.summary() model.get_config() model.get_weights() model.output_shape #%% import time start_time = time.clock() # Fit the model hist1 = model.fit(trainX, trainY, validation_data=(testX, testY), nb_epoch=1000, batch_size=200, verbose=2) end_time = time.clock() pretraining_time = (end_time - start_time) print ('Training took %f minutes' % (pretraining_time / 60.)) #%% # Final evaluation of the model loss, accuracy = model.evaluate(testX, testY) print("\nLoss: %.2f, Accuracy: %.2f%%" % (loss, accuracy*100)) #%% from sklearn.metrics import confusion_matrix y_pred = model.predict_classes(testX) print(y_pred) p=model.predict_proba(testX) # to predict probability
def parallel_cell_numeration(coefficients, nCuts, dim, threshold, pool = None, certain_hyperplane = 0): ''' Get the thetaB_list of all the regions. coefficients: the coefficients of all hyperplanes. nCuts: the number of hyperplanes. Dim: the dimension of space. threshold: the tolerance of the distance from the point to hyperplane certain_hyperplane: assume the sign of this hyperplane does not change. ''' #sub_NUM_CORES = 10 #Parallel #sub_pool = Pool(processes=sub_NUM_CORES) thetaB_list = [] #the structure to manage the process of all the function. #openset = Queue.Queue() openset = [] closedset = [] Initialized_region = initialize_region(coefficients, nCuts, dim, threshold) #openset.put(Initialized_region) openset.append(Initialized_region) print ('\nfinding adjacent regions...') # non-parallel if pool == None: while len(openset) != 0: #get the region and delete it from the openset. region = openset.pop(0) closedset.append(region) #calculate the adjacency regions adj_regions = cal_adjacency_regions(coefficients, nCuts, dim, region, certain_hyperplane, threshold)#, sub_pool) for region in adj_regions: #if region never appears. if region not in openset and region not in closedset: openset.append(region) # parallel else: start = time.clock() while len(openset) != 0: closedset.extend(openset) results = [pool.apply_async(cal_adjacency_regions, args = (coefficients, nCuts, dim, region, certain_hyperplane, threshold, None, closedset)) for region in openset] #set openset is empty openset = [] #get the result for p in results: openset.extend(p.get()) #get the unique list openset.sort() openset = list(openset for openset, _ in itertools.groupby(openset)) end = time.clock() time_adj_region = end - start print ('\nFinish finding adjacent regions using: %0.2f' %(time_adj_region)) #Reflection for region in closedset: thetaB_list.append(region) ref_thetaB = reflection(region) thetaB_list.append(ref_thetaB) #length = len(thetaB_list) return thetaB_list
def __init__(self, radius, center, contour, area): self.radius = radius self.center = center self.contour = contour self.area = area self.timestamp = time.clock()
from time import time import time from tree import Tree import parallel_cell_enumeration as p_cell import parallel_masterprob as p_ma import pre_process as pre import subprob as sub # Generate data sets of different scales and then test different seeds. # export OMP_NUM_THREADS=2 (Environment Variable OMP_NUM_THREADS sets the number of threads) if __name__ == "__main__": start_all = time.clock() NUM_CORES = 64 #Parallel pool = Pool(processes=NUM_CORES) pool = None # Different size of test data set M, K, N M_list = [1] #[1, 4, 8, 16, 32] K_list = [2] #[2, 4, 6] N_list = [10] #[4, 8, 12] # Generate test data sets for i_1 in xrange(len(M_list)): for i_2 in xrange(len(K_list)):
if Confirm.upper() == 'Q' : exit() elif Confirm.upper() == 'N' : break elif Confirm.upper() == 'Y' : print("\nEntraînement de tapage de mot de passe ! Ecrivez-le, le plus rapidement possible.\n") print(" Commencement au 'GO' dans 4 secondes :") sleep(1) print(" 3... ") sleep(1) print(" 2... ") sleep(1) print(" 1... ") sleep(1) ChronoDeb = time.clock() VPass = input("\n GO ! : ") ChronoFin = time.clock() result = ChronoFin - ChronoDeb elif VPass == SPass : print("\nVous avez correctement recopié votre création. Temps d'exécution : ", result) else VPass != SPass : print("Votre création n'a pas été parfaitement recopiée. Temps d'exécution : ", result) print("Fin")
print ("%.1f"%percentage) + " %" if first: print "\n" except TypeError: sum = numpy.sum(array) if sum != 0: array[:] /= sum dir = "prepared_text//" #dir = "full_text//" ext = ".txt" order = 16 transitions_file = "transition_hgwells_order_"+str(order) calculate = True names = ["pg1268"] text_gen = markov_chain() if calculate: import time start_time = time.clock() text_gen.calculate_transitions([dir+name+ext for name in names], order) print("--- %s seconds ---" % (time.clock() - start_time)) # text_gen.save(dir + transitions_file + ".data") #else: # text_gen.calculate_transitions([dir+name+ext for name in names], 0, False) # text_gen.load(dir + transitions_file + ".data") text_gen.generate_string(4000, show_string=False)
def imdb(self): if self.running: print "EMC iMDB: Search already Running." elif not self.running: print "EMC iMDB: Search started..." self["done_msg"].show() self.no_cover() self.running = True self.counter_download = 0 self.counter_exist = 0 self.counter_no_poster = 0 self.t_elapsed = 0 self.menulist = [] self.count_movies = len(self.m_list) self["exist"].setText(_("Exist: %s") % "0") self["no_poster"].setText(_("No Cover: %s") % "0") self["download"].setText(_("Download: %s") % "0") self["done_msg"].setText(_("Searching...")) self.starttime = 0 self.t_start_time = time.clock() self.s_supertime = time.time() self.cm_list = self.m_list[:] self.search_list = [] self.exist_list = [] self.check = False self["done_msg"].setText(_("Creating Search List..")) self.counting = 0 self.count_total = len(self.cm_list) urls = [] for each in sorted(self.cm_list): (title, path) = each cover_path = re.sub(self.file_format + "$", '.jpg', path) if os.path.exists(cover_path): self.counter_exist += 1 self.counting += 1 self.menulist.append(imdb_show(title, cover_path, _("Exist"), "", title)) self["m_info"].setText(title) self["no_poster"].setText(_("No Cover: %s") % str(self.counter_no_poster)) self["exist"].setText(_("Exist: %s") % str(self.counter_exist)) self["download"].setText(_("Download: %s") % str(self.counter_download)) self["menulist"].l.setList(self.menulist) self["menulist"].l.setItemHeight(image()) self.check = True print "EMC iMDB: Cover vorhanden - %s" % title else: filename = path title = getMovieNameWithoutPhrases(getMovieNameWithoutExt(title)) if re.search('[Ss][0-9]+[Ee][0-9]+', title) is not None: season = None episode = None seasonEpisode = re.findall('.*?[Ss]([0-9]+)[Ee]([0-9]+)', title, re.S|re.I) if seasonEpisode: (season, episode) = seasonEpisode[0] name2 = re.sub('[Ss][0-9]+[Ee][0-9]+.*[a-zA-Z0-9_]+','', title, flags=re.S|re.I) url = 'http://thetvdb.com/api/GetSeries.php?seriesname=%s&language=de' % name2.replace(' ','%20') urls.append(("serie", title, url, cover_path, season, episode)) else: url = 'http://api.themoviedb.org/3/search/movie?api_key=8789cfd3fbab7dccf1269c3d7d867aff&query=%s&language=de' % title.replace(' ','%20') urls.append(("movie", title, url, cover_path, None, None)) if len(urls) != 0: ds = defer.DeferredSemaphore(tokens=2) downloads = [ds.run(self.download, url).addCallback(self.parseWebpage, type, title, url, cover_path, season, episode).addErrback(self.dataError) for type, title, url, cover_path, season, episode in urls] finished = defer.DeferredList(downloads).addErrback(self.dataError2) else: self["done_msg"].setText(_("No Movies found!"))
def imdb_start(self): self["done_msg"].setText(_("Searching..")) self.starttime = time.time() self.run10 = "false" for i in xrange(10): #if self.search_list: if not len(self.search_list) == 0: (title, path) = self.search_list.pop() title = getMovieNameWithoutPhrases(getMovieNameWithoutExt(title)) self.start_time = time.clock() if config.EMC.imdb.search.value == "0": self.name = title.replace(' ','.').replace(':','.').replace('..','.') path = re.sub(self.file_format + "$", '.jpg', path) search_title = self.name.replace('.',' ') if not os.path.exists(path): self.counter3 += 1 url = "http://www.imdbapi.com/?t=" + self.name.replace('ö','%F6').replace(' ','%20').replace('.','%20') print "EMC imdbapi.com:", url getPage(url, timeout = 10).addCallback(self.imdbapi, search_title, path).addErrback(self.errorLoad, search_title) if config.EMC.imdb.search.value == "1": self.name = title.replace(' ','+').replace(':','+').replace('-','').replace('++','+') path = re.sub(self.file_format + "$", '.jpg', path) search_title = self.name.replace('+',' ') if not os.path.exists(path): self.counter3 += 1 url = "http://api.themoviedb.org/2.1/Movie.search/de/xml/8789cfd3fbab7dccf1269c3d7d867aff/" + self.name print "EMC themoviedb.org:", url getPage(url, timeout = 10).addCallback(self.themoviedb, search_title, path).addErrback(self.errorLoad, search_title) if config.EMC.imdb.search.value == "2": self.name = title.replace(' ','+').replace(':','+').replace('-','').replace('++','+') path = re.sub(self.file_format + "$", '.jpg', path) search_title = self.name.replace('+',' ') if not os.path.exists(path): self.counter3 += 1 url = "http://ofdbgw.home-of-root.de/search/%s" % self.name #url = "http://ofdbgw.home-of-root.de/search/" + self.name print "EMC ofdb.de:", url getPage(url).addCallback(self.ofdb_search, search_title, path).addErrback(self.errorLoad, search_title) if config.EMC.imdb.search.value == "3": self.name = title.replace(':', ' ').replace('-', ' ').replace('++', '+') path = re.sub(self.file_format + "$", '.jpg', path) search_title = urllib.quote(self.name.replace('+', ' ')) if not os.path.exists(path): self.counter3 += 1 url = "http://www.csfd.cz/hledat/?q=%s" % search_title print "EMC csfd.cz:", url getPage(url).addCallback(self.csfd_search, self.name, path).addErrback(self.errorLoad, self.name) else: print "EMC iMDB: MovieList is empty, search is DONE. - BREAK..." #self.e_supertime = time.time() #total_movie = self.counter3 + self.counter2 #total_time = self.e_supertime - self.s_supertime #avg = (total_time / total_movie) #self.done = _("%s Filme in %.1f sec gefunden. Avg. Speed: %.1f sec") % (total_movie, total_time, avg) #self["done_msg"].setText(self.done) #self.running = "false" break
def wrapper(*args, **kwargs): start = time.clock() response = func(*args, **kwargs) end = time.clock() print('time spend:', end - start) return response
def crawl(self): """ Starts crawling for this project """ # Reset flag self._flag = 0 # Clear the event flag # self.exitobj.clear() if os.name == 'nt': t1 = time.clock() else: t1 = time() # Set start time on config object self._configobj.starttime = t1 if not self._configobj.urlserver: self.push(self._baseUrlObj, 'crawler') else: try: # Flush url server of any previous urls by # sending a flush command. send_url("flush", self._configobj.urlhost, self._configobj.urlport) send_url( 'CRAWLER:' + str(self._baseUrlObj.priority) + '#' + str(self._baseUrlObj.index), self._configobj.urlhost, self._configobj.urlport) except: pass if self._configobj.fastmode: # Start harvestman controller thread import datamgr self._controller = datamgr.HarvestManController() self._controller.start() # Create the number of threads in the config file # Pre-launch the number of threads specified # in the config file. # Initialize thread dictionary self._basetracker.setDaemon(True) self._basetracker.start() while self._basetracker.get_status() != 0: sleep(0.1) for x in range(1, self._configobj.maxtrackers): # Back to equality among threads if x % 2 == 0: t = crawler.HarvestManUrlFetcher(x, None) else: t = crawler.HarvestManUrlCrawler(x, None) self.add_tracker(t) t.setDaemon(True) t.start() for t in self._trackers: if t.get_role() == 'fetcher': self._numfetchers += 1 elif t.get_role() == 'crawler': self._numcrawlers += 1 # bug: give the threads some time to start, # otherwise we exit immediately sometimes. sleep(2.0) self.mainloop() # Set flag to 1 to denote that downloading is finished. self._flag = 1 self.stop_threads(noexit=True) else: self._basetracker.action()
def crawl(self): """ Starts crawling for this project """ # Reset flag self._flag = 0 # Clear the event flag # self.exitobj.clear() if os.name=='nt': t1=time.clock() else: t1=time() # Set start time on config object self._configobj.starttime = t1 if not self._configobj.urlserver: self.push(self._baseUrlObj, 'crawler') else: try: # Flush url server of any previous urls by # sending a flush command. send_url("flush", self._configobj.urlhost, self._configobj.urlport) send_url('CRAWLER:' + str(self._baseUrlObj.priority) + '#' + str(self._baseUrlObj.index), self._configobj.urlhost, self._configobj.urlport) except: pass if self._configobj.fastmode: # Start harvestman controller thread import datamgr self._controller = datamgr.HarvestManController() self._controller.start() # Create the number of threads in the config file # Pre-launch the number of threads specified # in the config file. # Initialize thread dictionary self._basetracker.setDaemon(True) self._basetracker.start() while self._basetracker.get_status() != 0: sleep(0.1) for x in range(1, self._configobj.maxtrackers): # Back to equality among threads if x % 2==0: t = crawler.HarvestManUrlFetcher(x, None) else: t = crawler.HarvestManUrlCrawler(x, None) self.add_tracker(t) t.setDaemon(True) t.start() for t in self._trackers: if t.get_role() == 'fetcher': self._numfetchers += 1 elif t.get_role() == 'crawler': self._numcrawlers += 1 # bug: give the threads some time to start, # otherwise we exit immediately sometimes. sleep(2.0) self.mainloop() # Set flag to 1 to denote that downloading is finished. self._flag = 1 self.stop_threads(noexit = True) else: self._basetracker.action()
def upload_all(): tick = time.clock() file = request.files['image'] if not file: return jsonify({'error': 'No file was uploaded.'}) if not allowed_file(file.filename): return jsonify({'error': 'Please upload a JPG or PNG.'}) if request.form['request'] == "": return jsonify({'answer': "Please, ask a question!"}) file_hash = hashlib.md5(file.read()).hexdigest() save_path = os.path.join(app.config['UPLOAD_FOLDER'], file_hash + '.jpg') file.seek(0) file.save(save_path) print("file:") print(save_path) question = request.form['request'] lst = question.split() lst[0] = lst[0][0].upper() + lst[0][1:] question = " ".join(lst) result = np.squeeze(sentencePredictor.predict(question)) maxval = 0 idx = 0 for i in range(len(result)): print(result[i]) if result[i] > maxval: maxval = result[i] idx = i # handle image first print("question:") print(question) print( str(idx) + "th NETWORK") name = question[:question.find(" ")] print(name) if (name == 'name' or name == 'Name'): print("adding person") name = question[question.find(" ") + 1:] Image = cv2.imread(save_path) answer = recognizer.add(Image, name, file_hash + '.jpg') if answer != "success": print("could not save person " + name + ", because: " + answer) return jsonify({'answer': answer}) # img captioning if idx == 0: answer = im2txt.feed_image(save_path) tock = time.clock() print(str(tock - tick) + "time used for iamge captioning") return jsonify({'answer': answer}) # face if (idx == 3): try: print("path = " + save_path) answer = "" Image = cv2.imread(save_path) people = recognizer.recognize(Image) if len(people) == 0: answer = "I don't see anyone here!" elif (len(people) == 1): if people[0] == "I don't know :(": answer = "I don't know :(" else: answer = "It is " + people[0] else: known_people = "" unknow_people = 0 for person in people: if person == "I don't know :(": unknow_people += 1 else: if known_people == "": known_people = person else: known_people = known_people + ", " + person if known_people == "": answer = "There are " + str(unknow_people) + " people. I don't know anyone here." else: answer = "There are " + known_people if unknow_people > 0: answer = answer + ". There are " + str(unknow_people) + " people, which I don't know." except: tock = time.clock() print(str(tock - tick) + "time used for face") return jsonify({'answer': "I don't see anyone here!"}) tock = time.clock() print(str(tock - tick) + "time used for face") return jsonify({'answer': answer}) # emotion if (idx == 2): img = cv2.imread(save_path) faces = faceDetector.detect_Face(img) if len(faces) == 0: answer = "There are no people here!" return jsonify({'answer': answer}) else: answer = [] for face in faces: answer.append(emotionDetector.predict(img, face)) tock = time.clock() print(str(tock - tick) + "time used for emotion") print(answer) return jsonify({'answer': ",".join(answer)}) # question answering if idx == 1: feature = questionAnswering.img_handler(save_path) if feature is None: tock = time.clock() print(str(tock - tick) + "time used for qa") return jsonify({'error': 'Error reading image.'}) # image + question img_ques_hash = hashlib.md5(file_hash + question).hexdigest() json = questionAnswering.get_answers(question, feature, save_path, img_ques_hash, VIZ_FOLDER) tock = time.clock() print(str(tock - tick) + "time used for qa") return jsonify(json) # text if idx == 4: json = textReader.read(save_path) print(type(json)) if json == '': tock = time.clock() print(str(tock - tick) + "time used for text") return jsonify({'answer': "I don't see the text here!"}) tock = time.clock() print(str(tock - tick) + "time used for text") return jsonify({'answer':json}) else: tock = time.clock() print(str(tock - tick) + "time used for text") return jsonify({'answer':"Error text"})