def xavier (self): self.dork = self.maximus(self.mylist1) self.pork = self.maximus(self.mylist2) base = 0.0 mars = statistics(self.pork[0]) venus = statistics(self.pork) base = mars.mymean(self.pork[0]) statbase = mars.mymean(self.pork[0]) i = 0 temp = 0.0 temp1 = 0.0 temp2 = 0.0 slop = 0.0 while i < len(self.pork): # might need to make this into a switch statement while j < len(self.pork[i]): temp = self.pork[i][j] j = j + 1 if (temp > base+8.0): #need to change the value for threshhold temp1 = venus.standardev(self.pork[i]) if(temp1 > statbase*2): #checks the standard deviation for the subset temp2 = venus.rootmenasquare(self.pork[i]) #checks the rootmean value to determine if physical if(temp2 > 84.0): #physical else: slope = self.hitler(i) # since its not physical check the slop of best fit line if (slope > ): #do something here to determine if hte slope is steep enough to classify as emotion else: #do nothing or return to start elif (venus.mymean(self.pork[i]) < base): base = venus.mymean(self.pork[i]) else: # do nothing
def xavier (self): self.dork = self.maximus(self.mylist1) self.pork = self.maximus(self.mylist2) base = 0.0 mars = statistics(self.pork[0]) venus = statistics(self.pork) base = mars.mymean(self.pork[0]) statbase = mars.mymean(self.pork[0])
def calculateDiffs(pcTarget, pcSource, sourceTree): mind = 1000 maxd = 0 sumd = 0.0 sumd2 = 0.0 nCorrect = 0 nTotal = len(np.asarray(pcTarget.points)) for j in range(nTotal): [k, idx, _] = sourceTree.search_knn_vector_3d(pcTarget.points[j], 1) p0 = np.asarray(pcTarget.points)[j, :] p1 = np.asarray(pcSource.points)[idx[0], :] d = np.linalg.norm(p0 - p1) sumd += d sumd2 += d*d if mind > d: mind = d if maxd < d: maxd = d if thre > d: nCorrect += 1 stas = statistics() stas.min = mind stas.max = maxd stas.avg = sumd / nTotal stas.stddev = np.sqrt(sumd2 / nTotal - ((sumd/nTotal)*(sumd/nTotal))) stas.numCorrect = nCorrect stas.n = nTotal return stas
def make_tree(filename="sample.123.321.dump.gz"): #Print statistics at the end of the program printstat = False stat = statistics(1234) #Create binary tree root root = node(socket.inet_aton("0.0.0.0"), 0, [], None, False, 0) dump = Parser(filename) loop = True #Loop over the input file, reading one line at a time. while loop: try: line = dump.next() except: print "EOF" break #Parsing dumps if line[0] == 'D': for entry in line[3]: fromSet = False time = entry[-1] act = 'A' #Action: A|W revoked = None mask = line[2] prefix = int2ip(line[1]) if act == "W": revoked = 'revoked' else: revoked = None origin = parseASpath(entry, stat) new_node = node(socket.inet_aton(prefix), mask, origin, revoked, fromSet, time) addAction(act, root, new_node, stat) #Parsing updates elif line[0] == 'U': for entry in line[3]: time = entry[-1] fromSet = False act = entry[0] #Action: A|W revoked = None mask = entry[2] prefix = int2ip(entry[1]) if act == "W": revoked = 0 else: revoked = None origin = parseASpath(entry, stat) new_node = node(socket.inet_aton(prefix), mask, origin, revoked, fromSet, time) addAction(act,root, new_node, stat) #Unknown type (neither dump nor update) else: print "Unknown message type " + line[0] return root
def process_books(): tale2cities_url = 'https://raw.githubusercontent.com/maigfrga/spark-streaming-book/'\ 'master/data/books/tale2cities.txt' hamlet_url = 'https://raw.githubusercontent.com/maigfrga/spark-streaming-book/'\ 'master/data/books/hamlet.txt' tale2cities = clean_book(create_text_rdd_from_url(tale2cities_url)) tale2cities = remove_stop_words(tale2cities) tale2cities = exclude_popular_words(tale2cities, 10) tale2cities.setName('A tale of two cities') hamlet = clean_book(create_text_rdd_from_url(hamlet_url)) hamlet = remove_stop_words(hamlet) hamlet = exclude_popular_words(hamlet, 10) hamlet.setName('hamlet') #report(tale2cities, hamlet) statistics(tale2cities) statistics(hamlet)
def hitler (self, k): min1 = min(self.pork[k-1]) mx1 = max(self.pork[k]) merged = [] merged = self.pork[k-1] + self.pork[k] jupiter = statistics(calcifer) calcifer = [] i = merged.index(min(merged)) while i <= merged.index(max(merged)): calcifer.append(merged[i]) i = i + 1 return jupiter.lineread(calcifer)
def post(self, request, *args, **kwargs): # Code for POST requests context = super(IndexView, self).get_context_data(**kwargs) regions = Region.objects.all() try: for region in regions: stat = statistics(region) stat.fetch() messages.success(request, 'Content synced successfully') except Exception as e: messages.error( request, 'Some error occurred while syncing data (' + str(e) + ')') return super(IndexView, self).render_to_response(context)
list_of_marks.append(mark) name = values[0] list_of_names.append(name) highest = max(list_of_marks) lowest = min(list_of_marks) average = (sum(list_of_marks)) / (len(list_of_marks)) stats = (lowest, highest, average, top_students) return stats def print_results(stats): """Print the statistics given. The parameters 'stats' is a tuple of the form returned by the 'statistics' function above. """ (minimum, maximum, average, top_students) = stats print("Minimum mark is: {:.2f}".format(minimum)) print("Maximum mark is: {:.2f}".format(maximum)) print("Average mark is: {:.2f}".format(average)) if len(top_students) == 1: print("The top student: {}".format(top_students[0])) else: print("The top-equal students:\n {}".format(", ".join(top_students))) data = statistics([('Angus McGurkinshaw', '25\n'), ('Thomas Albert Finkelstein III', '75\n'), ('Myrtle', '50\n')]) print_results(data)
if __name__ == '__main__': # 默认使用 GBK 编码 encoding = 'gbk' # 获取输入文件位置 file_path = get_input_path() # 计算出输出文件位置 statistics_path = get_output_path(file_path, 'statistics.csv') absence_path = get_output_path(file_path, 'absence.csv') # 从文件名中获取统计表的起止日期 month, start, end = get_start_end(file_path) # 根据起止日期生成工作日列表 workdays = work_calendar_for_period(2017, month, start, end) # 统计出勤信息 statistics(file_path, workdays, encoding) # 生成缺勤记录 find_absence(statistics_path, workdays, encoding) # 删除上周四的统计 start_date = '2017/%s/%s' % (month, start) if end - start == 7 and days_in_week(start_date): print('\n%s 记录已删除' % start_date) remove_records(statistics_path, start_date) remove_records(absence_path, start_date)
f_list = os.listdir("./") f_list.sort() for cur_file in f_list: ll = 0 try: n_file = open("./" + cur_file, 'r') except: ll += 1 if ll == 0: name = os.path.basename(n_file.name) ns = name.split('-') fg = name.split('_') if (ns[0] == "psyncLog" and len(fg) == 1): statistics(cur_file, 'final_file.csv') if 0 in file_dict: print("it's here though") else: file_dict[0].append(0.0) if 0 in count_dict: print("it's here") else: count_dict[0].append(0.0) std_ar = [] for key, vt in count_dict.items(): sum = 0 sp = 0 for k in range(len(count_dict[key])): sum = sum + count_dict[key][k]
def main(): system = 1 #default system == l3 = 1 #print(system) argumentList = [ [{ "name": "a|b", "sign": "+" }], ### basics [{ "name": "a|b", "sign": "-" }], [{ "name": "~(a|b)", "sign": "+" }], [{ "name": "~(a|b)", "sign": "-" }], [{ "name": "~(~a|b)", "sign": "-" }], [{ "name": "a|~b", "sign": "-" }], [{ "name": "a&b", "sign": "+" }], [{ "name": "a&b", "sign": "-" }], [{ "name": "~(a&b)", "sign": "+" }], [{ "name": "~(a&b)", "sign": "-" }], [{ "name": "a>b", "sign": "+" }], [{ "name": "a>b", "sign": "-" }], [{ "name": "~(a>b)", "sign": "+" }], [{ "name": "~(a>b)", "sign": "-" }], [{ "name": "a&b&c", "sign": "+" }], [{ "name": "a&b&c", "sign": "-" }], [{ "name": "~(a&b&c)", "sign": "+" }], [{ "name": "~(a&b&c)", "sign": "-" }], [{ "name": "a|b|c", "sign": "+" }], [{ "name": "a|b|c", "sign": "-" }], [{ "name": "~(a|b|c)", "sign": "+" }], [{ "name": "~(a|b|c)", "sign": "-" }], [{ "name": "a&b>c", "sign": "+" }], [{ "name": "a&b>c", "sign": "-" }], [{ "name": "~(a>b&c)", "sign": "+" }], [{ "name": "~(a>b&c)", "sign": "-" }], ] preProgrammedList = [ [{ "name": "p&(q|~q)", "sign": "+" }, { "name": "r", "sign": "-" }], [{ "name": "p&~p", "sign": "+" }, { "name": "q", "sign": "-" }], ### problems from the book [{ "name": "p&q", "sign": "+" }, { "name": "p", "sign": "-" }], [{ "name": "p", "sign": "+" }, { "name": "p|q", "sign": "-" }], [{ "name": "p&(q|r)", "sign": "+" }, { "name": "(p&q)|(p&r)", "sign": "-" }], [{ "name": "p|(q&r)", "sign": "+" }, { "name": "(p|q)&(p|r)", "sign": "-" }], [{ "name": "~~p", "sign": "+" }, { "name": "p", "sign": "-" }], [{ "name": "(p&q)>r", "sign": "+" }, { "name": "(p&~r)>q", "sign": "-" }], [{ "name": "(p&~p)", "sign": "+" }, { "name": "(p|~p)", "sign": "-" }], [{ "name": "(p&~p)", "sign": "+" }, { "name": "(q|~q)", "sign": "-" }], [{ "name": "(p|q)", "sign": "+" }, { "name": "(p&q)", "sign": "-" }], [{ "name": "p", "sign": "+" }, { "name": "~(p&~q)", "sign": "+" }, { "name": "(q)", "sign": "-" }], [{ "name": "(p&q)>r", "sign": "+" }, { "name": "p>(~q|r)", "sign": "-" }] ] preProgrammedList = [ [{ "name": "p&~p", "sign": "+" }, { "name": "q", "sign": "-" }], #[{"name": "p&~p", "sign": "+" },{"name": "q", "sign": "-" } ] #[{"name": "p&(q|~q)", "sign": "+" },{"name": "r", "sign": "-" } ], #[{"name": "(p&q)>r", "sign": "+" },{"name": "(p&~r)>q", "sign": "-" } ], #[{"name": "(p|q)", "sign": "+" },{"name": "(p&q)", "sign": "-" } ] ] # NOT VALID IN L3 RM!! preProgrammedList = [[{ "name": "p&~p", "sign": "+" }, { "name": "q", "sign": "-" }], [{ "name": "p&(q|~q)", "sign": "+" }, { "name": "r", "sign": "-" }], [{ "name": "(p&q)>r", "sign": "+" }, { "name": "(p&~r)>q", "sign": "-" }], [{ "name": "(p|q)", "sign": "+" }, { "name": "(p&q)", "sign": "-" }]] # from pytableaux preProgrammedList = [ [{ "name": "p&(q|~q)", "sign": "+" }, { "name": "r", "sign": "-" }], [{ "name": "p&~p", "sign": "+" }, { "name": "q", "sign": "-" }], ### problems from the book [{ "name": "p&q", "sign": "+" }, { "name": "p", "sign": "-" }], [{ "name": "p", "sign": "+" }, { "name": "p|q", "sign": "-" }], [{ "name": "p&(q|r)", "sign": "+" }, { "name": "(p&q)|(p&r)", "sign": "-" }], [{ "name": "p|(q&r)", "sign": "+" }, { "name": "(p|q)&(p|r)", "sign": "-" }], [{ "name": "~~p", "sign": "+" }, { "name": "p", "sign": "-" }], [{ "name": "(p&q)>r", "sign": "+" }, { "name": "(p&~r)>q", "sign": "-" }], [{ "name": "(p&~p)", "sign": "+" }, { "name": "(p|~p)", "sign": "-" }], [{ "name": "(p&~p)", "sign": "+" }, { "name": "(q|~q)", "sign": "-" }], [{ "name": "(p|q)", "sign": "+" }, { "name": "(p&q)", "sign": "-" }], [{ "name": "p", "sign": "+" }, { "name": "~(p&~q)", "sign": "+" }, { "name": "(q)", "sign": "-" }], [{ "name": "(p&q)>r", "sign": "+" }, { "name": "p>(~r|q)", "sign": "-" }], [{ "name": "p|q", "sign": "+" }, { "name": "~q", "sign": "+" }, { "name": "p", "sign": "-" }], [{ "name": "p|q", "sign": "+" }, { "name": "p", "sign": "+" }, { "name": "q", "sign": "-" }], [{ "name": "q", "sign": "+" }, { "name": "p|~p", "sign": "-" }], [{ "name": "p>q", "sign": "+" }, { "name": "~p", "sign": "+" }, { "name": "q", "sign": "-" }], [{ "name": "p&~p", "sign": "+" }, { "name": "q", "sign": "+" }, { "name": "p>p", "sign": "-" }], [{ "name": "p>q", "sign": "+" }, { "name": "p", "sign": "+" }, { "name": "q", "sign": "-" }], [{ "name": "p>q", "sign": "+" }, { "name": "~q", "sign": "+" }, { "name": "~p", "sign": "-" }], [{ "name": "~(p|q)", "sign": "+" }, { "name": "~p&~q", "sign": "-" }], [{ "name": "~(p&q)", "sign": "+" }, { "name": "~p|~q", "sign": "-" }], [{ "name": "~p&~q", "sign": "+" }, { "name": "~(p|q)", "sign": "-" }], [{ "name": "~p|~q", "sign": "+" }, { "name": "~(p&q)", "sign": "-" }], [{ "name": "p>(p>q)", "sign": "+" }, { "name": "p>q", "sign": "-" }], [{ "name": "(p>(p>q))>p", "sign": "+" }, { "name": "p>q", "sign": "-" }], [{ "name": "(p>q)&(r>s)", "sign": "+" }, { "name": "(p>s)|(r>q)", "sign": "-" }], # from advanced logic test set [{ "name": "(p&q)>r", "sign": "+" }, { "name": "p>(~q|r)", "sign": "-" }], [{ "name": "(p&q)>r", "sign": "+" }, { "name": "(p&~r)>~q", "sign": "-" }], #[{"name": "(p>(p>q))>(p>q)", "sign": "+" }] ] #preProgrammedList = [[{"name": "p>q", "sign": "-" } ]] #preProgrammedList = [ #[{"name": "p&~p", "sign": "+" },{"name": "q", "sign": "-" } ] #] #pE = parsingExpression() #system, argumentList = list(pE.userInput) #preProgrammedList = [[{'name': '(p&~p)', 'sign': '+'}, {'name': '(p|~p)', 'sign': '-'}]] ### in L3, D = 1 ### in RM3, D = i, 1 #preProgrammedList = [ #[{"name": "(p&q)>r", "sign": "+" },{"name": "p>(~r|q)", "sign": "-" } ] #] #preProgrammedList = [[{"name": "p&(q|~q)", "sign": "+" },{"name": "r", "sign": "-" } ]] #preProgrammedList = [[{"name": "p|q", "sign": "+" },{"name": "p", "sign": "+" },{"name": "q", "sign": "-" } ]] #if len(argumentList) == 1: #inOrder = (['p&~p'], []) #preProgrammedList = [[{'name': '~(p|q)', 'sign': '+'}, {'name': '~(p&q)', 'sign': '-'}]] argumentList = preProgrammedList T1 = statistics() T2 = statistics() listOfTObjects = [] table = [] for argument in argumentList: T1, tree1 = pros(1, T1, argument) T2, tree2 = pros(2, T2, argument) comp = comparisonTableaux(tree1, tree2) l = 1 x = statContDeal(T1, comp.compare(), l) # l = 1 = small table y = statContDeal(T2, comp.compare(), l) table.append(x) table.append(y) x = T1.getListOfAll() y = T2.getListOfAll() listOfTObjects.append(x) listOfTObjects.append(y) ### what do i want to show # tableaux xx # number of nodes # height xx # number of branches # number of closed branches # if not closed, then a countermodel with values for premises and conclusions # for both trees, comparing: # runtime # edit distance xx if l == 0: table = tabulate(table, headers=[ 'logic', 'arguments', 'valid', 'height', 'number\nnodes', 'number\nbranches', 'rules\napplied', 'ratio\nclosed/open', 'edit\ndistance', 'runtime', 'countermodel', 'countermodel \ncheck' ], tablefmt='grid') if l == 1: table = tabulate(table, headers=[ 'logic', 'arguments', 'valid', 'runtime', 'countermodel', 'countermodel \ncheck' ], tablefmt='grid') print(table) plotting(listOfTObjects) print("all prewritten arguments are resolved") return 1
def acquire_information(): my_dict = {} my_type = {} regiontable = {} typee = [] imy_dict = {} imy_type = {} iregiontable = {} itypee = [] sum = 0 #start = 0 for i in range(7): my_dict[i] = {} imy_dict[i] = {} num = 1 j1 = json.load(open('new.json'), strict=False) content = list(j1.values()) for c in content: area = {} centroidx = {} centroidy = {} nodee = {} R = {} maxarea = 0 #information from each image #if num <= 3: xs = [r['shape_attributes']['all_points_x'] for r in c['regions']] ys = [r['shape_attributes']['all_points_y'] for r in c['regions']] type = [r['region_attributes']['type'] for r in c['regions']] name = c['filename'] #print(c['filename']) for i in range(0, len(xs)): area[i] = get_area(xs[i], ys[i]) if area[i] > maxarea: maxarea = area[i] centroidx[i], centroidy[i] = get_centroid(xs[i], ys[i]) #print(type[i]) nodee[i] = nnode(type[i], area[i], centroidx[i], centroidy[i], i, xs[i], ys[i]) #nodee[i].polytomask(name) #print(nodee[3].area) for i in range(0, len(xs)): nodee[i].aver(maxarea) nodee[i].contain(nodee) #nodee[i].maskcontain(nodee) typee = get_typee(nodee, typee) sum, itypee, imy_type, imy_dict = deep_acquire_information1( sum, nodee, itypee, imy_type, imy_dict) nodeee = get_container(nodee) R = get_relation(nodeee) my_type = get_mytype(R, nodeee, my_type) #print(R) my_dict = statistics(nodeee, R, my_dict) #if num == 1: # break num += 1 #print(typee) #print("my_type:") #print(my_type) stypee = {} istypee = {} for c in content: area = {} centroidx = {} centroidy = {} nodee = {} R = {} maxarea = 0 #information from each image #if num <= 3: xs = [r['shape_attributes']['all_points_x'] for r in c['regions']] ys = [r['shape_attributes']['all_points_y'] for r in c['regions']] type = [r['region_attributes']['type'] for r in c['regions']] for i in range(0, len(xs)): area[i] = get_area(xs[i], ys[i]) if area[i] > maxarea: maxarea = area[i] centroidx[i], centroidy[i] = get_centroid(xs[i], ys[i]) #print(type[i]) nodee[i] = nnode(type[i], area[i], centroidx[i], centroidy[i], i, xs[i], ys[i]) #print(nodee[3].area) for i in range(0, len(xs)): nodee[i].aver(maxarea) nodee[i].contain(nodee) for i in range(0, len(xs)): if len(nodee[i].children) != 0: print(nodee[i].type) for j in range(len(nodee[i].children)): print(nodee[(nodee[i].children)[j]].type) print('/s') iregiontable, istypee = deep_acquire_information2( nodee, iregiontable, istypee, itypee, imy_type, imy_dict) nodeee = get_container(nodee) R = get_relation(nodeee) regiontable = region_table1(R, nodeee, my_type, regiontable) stypee = area_table1(nodee, typee, stypee) #mytype = get_mytype(R,nodeee,my_type) #print(R) #my_dict = statistics(nodeee,R,my_dict) num += 1 regiontable = region_table2(my_type, regiontable) areatable = area_table2(typee, stypee) iregiontable = region_table2(imy_type, iregiontable) iareatable = area_table2(itypee, istypee) typetable = type_table(my_dict) itypetable = type_table(imy_dict) #print(nodee[i].area) #print(type) #print(xs) #print(ys) #print(area) #print(centroidx) #print(centroidy) #print(nodee[3].area) #print(num/2) #print(stypee) #print(regiontable) #print(areatable) #print(iregiontable) #print(iareatable) #print("imy_type:") #print(imy_type) #print(typetable) #print(sum) #print(itypetable) return typetable, regiontable, areatable, itypetable, iregiontable, iareatable
def statistics(): '''Calculates and prints out the descriptive statistics about a city and time period specified by the user via raw input. Args: none. Returns: none. ''' # Filter by city (Chicago, New York, Washington) city = get_city() #city = 'test.csv' # Load city city_file = [] # Reset variable each time to avoid running out of memory print("\nLoading city (WARNING this could take up to 10 minutes)...") start_time = time.time() city_file = load_city(city) print("{} records loaded, that took {} seconds.".format( len(city_file), time.time() - start_time)) # Filter by time period (month, day, none) time_period = get_time_period() print('\nCalculating the first statistic...') start_time = time.time() # What is the most popular month for start time? if time_period[0] == 'NONE': # Call popular_month function and print the results print('Most popular month: {}'.format( popular_month(city_file, time_period))) print("That took %s seconds." % (time.time() - start_time)) print("\nCalculating the next statistic...") start_time = time.time() # What is the most popular day of week (Monday, Tuesday, etc.) for start time? if time_period[0] == 'NONE' or time_period[0] == 'MONTH': # Call popular_day function and print the results print('Most popular day of week: {}'.format( popular_day(city_file, time_period))) print("That took %s seconds." % (time.time() - start_time)) print("\nCalculating the next statistic...") start_time = time.time() # What is the most popular hour of day for start time? print('Most popular hour of day: {}'.format( popular_hour(city_file, time_period))) print("That took %s seconds." % (time.time() - start_time)) print("\nCalculating the next statistic...") start_time = time.time() # What is the total trip duration and average trip duration? trip_stats = trip_duration(city_file, time_period) if trip_stats is not None: print("Average trip duration: {}".format(trip_stats[1])) print("Total trip duration: {}".format(trip_stats[0])) else: print("No trip data found.") print("That took %s seconds." % (time.time() - start_time)) print("\nCalculating the next statistic...") start_time = time.time() # What is the most popular start station and most popular end station? popular_station = popular_stations(city_file, time_period) if popular_station is not None: print("Most popular start station: \"{}\"".format(popular_station[0])) print("Most popular end station: \"{}\"".format(popular_station[1])) else: print("No station data found.") print("That took %s seconds." % (time.time() - start_time)) print("\nCalculating the next statistic...") start_time = time.time() # What is the most popular trip? pop_trip = popular_trip(city_file, time_period) if pop_trip is not None: print("Most popular trip: \"{}\" to \"{}\"".format( pop_trip[0], pop_trip[1])) else: print("No trip data found.") print("That took %s seconds." % (time.time() - start_time)) print("\nCalculating the next statistic...") start_time = time.time() # What are the counts of each user type? user_types = users(city_file, time_period) for user in user_types: print("{}s: {}".format(user, user_types[user])) print("That took %s seconds." % (time.time() - start_time)) print("\nCalculating the next statistic...") start_time = time.time() # What are the counts of gender? gender_types = gender(city_file, time_period) if gender_types is None: print("Gender info not in data file.") else: for genders in gender_types: print("{}s: {}".format(genders, gender_types[genders])) print("That took %s seconds." % (time.time() - start_time)) print("\nCalculating the next statistic...") start_time = time.time() # What are the earliest, most recent, and most popular birth years? births = birth_years(city_file, time_period) if births is None: print("Birth year info not in data file.") else: print("Earliest birth year: {}".format(births[0])) print("Most recent birth year: {}".format(births[1])) print("Most popular birth year: {}".format(births[2])) print("That took %s seconds." % (time.time() - start_time)) # Display five lines of data at a time if user specifies that they would like to display_data(city_file, time_period) # Restart? restart = input('\nWould you like to restart? Type \'yes\' or \'no\'. ') if restart.lower() == 'yes': statistics()
print("Gender info not in data file.") else: for genders in gender_types: print("{}s: {}".format(genders, gender_types[genders])) print("That took %s seconds." % (time.time() - start_time)) print("\nCalculating the next statistic...") start_time = time.time() # What are the earliest, most recent, and most popular birth years? births = birth_years(city_file, time_period) if births is None: print("Birth year info not in data file.") else: print("Earliest birth year: {}".format(births[0])) print("Most recent birth year: {}".format(births[1])) print("Most popular birth year: {}".format(births[2])) print("That took %s seconds." % (time.time() - start_time)) # Display five lines of data at a time if user specifies that they would like to display_data(city_file, time_period) # Restart? restart = input('\nWould you like to restart? Type \'yes\' or \'no\'. ') if restart.lower() == 'yes': statistics() if __name__ == "__main__": statistics() #EOF