def numPairsDivisibleBy60(self, time) -> int: if len(time) == 1: return 0 # Note: counter 类似 defaultdict ht = Counter(time) time.sort() # print(time) threshold = ceil((time[-2] + time[-1]) / 60) * 60 # print('threshold', threshold) res = 0 for a in time: for targetSum in range(threshold, 0, -60): # print('targetSum', targetSum) b = targetSum - a if a > b: continue if ht[b] and a != b: # print(f"pair: {a} + {b} = {targetSum}") res += ht[b] elif ht[b] >= 2 and a == b: # print(f"pair: {a} + {b} = {targetSum} * {ht[a]-1}") res += ht[b] - 1 ht[a] -= 1 return res
def plot(benchmark) : import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.patches as mpat from matplotlib.font_manager import FontProperties r_dir = '%s/%s' %(exp_dir,benchmark) data,TO = getExpResult(r_dir) #data,TO = getExpResult(benchmark) # Default reference set to z3, otherwise pick the first one as reference if 'z3' in data : ref = 'z3' else : ref = data.keys()[0] # trim and sort # Eliminate the TO/Aborted/Inconsistent cases then sort non-decreasingly trim = {} for solver in data : time = [] for i in range(len(data[solver]['path'])) : rsat = data[ref]['ans'][i] sat = data[solver]['ans'][i] if sat != 't' and sat != 'x' : if sat == '1' and rsat == '0' or sat == '0' and rsat == '1' : continue time.append(float(data[solver]['time'][i])) time.sort() trim[solver] = time plotCumTime(benchmark,trim,plt,mpat) for solver in data : if isreach(solver) : plotScatter(benchmark,solver,data[solver],plt,mpat)
def change_classtime2date(course): start_time = [(8, 0), (8, 55), (9, 55), (10, 50), (11, 45), (13, 30), (14, 25), (15, 25), (16, 20), (17, 15), (18, 30), (19, 25), (20, 25)] end_time = [(8, 45), (9, 40), (10, 40), (11, 35), (12, 30), (14, 15), (15, 10), (16, 10), (17, 5), (18, 0), (19, 15), (20, 10), (21, 10)] count = len(course) current_time = datetime.date.today() current_weekday = current_time.weekday() # 周x current_monday = current_time - datetime.timedelta(days=current_weekday) # print(current_monday) course_list = [] for key in course.keys(): times = course[key] times = list(set(times)) for i in range(7): day = current_monday + datetime.timedelta(days=i) time = list(filter(lambda x: int(x.split("-")[0]) == i, times)) # print(time) if time == []: continue time.sort() # print(time) time = [int(t.split("-")[1]) for t in time] s = start_time[time[0]] e = end_time[time[-1]] start = datetime.datetime.combine(day, datetime.time(s[0], s[1])).strftime("%Y-%m-%d %H:%M:%S") end = datetime.datetime.combine(day, datetime.time(e[0], e[1])).strftime("%Y-%m-%d %H:%M:%S") course_list.append([key, start, end]) print(course_list) return course_list
def findMinDifference(self, timePoints: list) -> int: ans, n = 720, len(timePoints) time = [] for x in timePoints: time.append(self.minute(x)) time.sort() while ans: for i in range(len(time) - 1): ans = min(ans, time[i + 1] - time[i]) break ans = min(ans, 1440 - time[-1] + time[0]) return ans
def data_deleterepetition(data_origin,time): data = [] sort_time = np.argsort(time,axis=0) time.sort() for line in range(len(data_origin)) : data.append(data_origin[sort_time[line]]) repetition_index = [] data = np.array(data) for line in range(len(data) - 1): if (str(data[line].tolist()) == str(data[line + 1].tolist())) \ and (time[line] == time[line+1]): repetition_index.append(line) return np.delete(data,repetition_index,axis=0)
def time_func(self, func, shapes=((1, ), (1000, ), (100, 100)), iters=50, timeout=2000.0, verbose=False): np_time = [] time = [] argspec = inspect.getargspec(func) for i in range(iters): for shape in shapes: np_args, args = self.make_args(argspec, shape) try: np_time.append(func(*np_args)) except: np_time.append(np.inf) try: time.append(func(*args)) except Exception as e: return -1, -1, -1 if time[-1] > timeout: if verbose: print "{}.{} timed out".format(self.name, func.__name__) return 20.0, 20.0, 20.0 # Get rid of the top and bottom 2 if iters > 10: np_time.sort() np_time = np.asarray(np_time[2:-2]) time.sort() time = np.asarray(time[2:-2]) mean = np.mean(time) std = np.std(time) np_rel = np.sum(time) / np.sum(np_time) if verbose: print "{}.{}: {:.3f} +/- {:.2f} ms, {:.2f}x numpy".format( self.name, func.__name__, mean, std, np_rel) return mean, std, min(np_rel, 20.0)
def plot_noise_spec(specList, cp, dir, dag, qjob, qfile, tftuple): fignames = [] time = list(set(map(operator.itemgetter(0), tftuple))) time.sort() freq = list(set(map(operator.itemgetter(1), tftuple))) freq.sort() freq = array(freq, typecode='f') Time = array(time, typecode='d') - time[0] X, Y = meshgrid(Time, freq) start = str(time[0]) end = str(time[-1]) flat_specList = [] tftuple.sort(key=operator.itemgetter(2)) MIN = tftuple[0][2] MINTime = tftuple[0][0] tftuple.sort(key=operator.itemgetter(2), reverse=True) MAX = tftuple[0][2] MAXTime = tftuple[0][0] OUTLIER = [1 - MIN, MAX - 1] if (1 - MIN) > (MAX - 1): qscanTime = MINTime else: qscanTime = MAXTime dag.add_node( qscanNode(qjob, qscanTime, qfile, cp.get('pipeline', 'ifo'), dir, OUTLIER)) figname = str(max(OUTLIER)) + '-' + dir + '-' + start + '-' + end + '.png' A = array(specList, typecode='f') figure(1) pcolor(X, Y, A.transpose(), shading='flat', vmin=0.95, vmax=1.05) print "...plotting qscan for " + start title('h(t) and h(f) power ratios per freq bin GPS ' + start + '\n min = ' + str(MIN) + ' max = ' + str(MAX)) xlabel('Time') ylabel('Frequency') colorbar() savefig(dir + '/' + figname) thumb = 'thumb-' + figname savefig(dir + '/' + thumb, dpi=20) clf() #close() return figname, qscanTime
def check_day_conflict(time: list, start: int, end: int) -> bool: if end < start: return False if len(time) == 0: return True time = time.copy() time.append((start, 0)) time.append((end, 1)) time.sort(key=lambda t: t[1], reverse=True) time.sort(key=lambda t: t[0]) cur = 0 for time, op in time: if op == 0: # start cur += 1 else: #op == 1 end cur -= 1 if not (cur == 0 or cur == 1): return False return cur == 0
def gettime(activity_time, driver): e = driver.find_elements_by_xpath("//time[@class='timestamp']") time = [] for i in range(e.__len__()): if e[i].get_attribute("datetime") != None: time.append(e[i].get_attribute("datetime")) time.sort() new_activity_time = time[-1] last_activity_time = activity_time[-1] compare = last_activity_time == new_activity_time if compare == True: pass else: activity_time.append(new_activity_time) return compare, activity_time first_time = time[-2] last_time = time[-1]
def time(self, n=100): shapes = [(10, ), (1000, ), (100, 100)] time = [] for i in range(n): t = 0.0 for shape in shapes: pos = self.rand(shape) pos2 = self.rand(shape) neg = -self.rand(shape) t += time_unary([pos, neg], self.unary_ops) t += time_unary([pos, neg], self.unary_ufuncs) t += time_binary([pos], [pos2, neg], self.binary_ops) t += time_binary([pos], [pos2, neg], self.binary_ufuncs) time.append(t) # Get rid of the top and bottom 2 if n > 10: time.sort() time = np.asarray(time[2:-2]) print "{:.3f} +/- {:.2f} ms".format(np.mean(time), np.std(time)) return time
def time(self, n=100): shapes = [(10,), (1000,), (100, 100)] time = [] for i in range(n): t = 0.0 for shape in shapes: pos = self.rand(shape) pos2 = self.rand(shape) neg = -self.rand(shape) t += time_unary([pos, neg], self.unary_ops) t += time_unary([pos, neg], self.unary_ufuncs) t += time_binary([pos], [pos2, neg], self.binary_ops) t += time_binary([pos], [pos2, neg], self.binary_ufuncs) time.append(t) # Get rid of the top and bottom 2 if n > 10: time.sort() time = np.asarray(time[2:-2]) print "{:.3f} +/- {:.2f} ms".format(np.mean(time), np.std(time)) return time
def load_split_traj_file(fname): """given a traj file, load it and return lists of data for columns format could be old style csv file, or astropy ecsv """ logger = logging.getLogger() logger.debug('split_trajectory_file_called, ' + str(fname)) if not os.path.isfile(fname): print('traj_file_not_exist, ' + fname) raise if fname.endswith('.ecsv'): time, lat, lon, elev, x, y, z, brightness = load_traj_ecsv(fname) elif 'MOP' in fname: #new py style time, lat, lon, elev, x, y, z, brightness = load_traj_MOP(fname) else: #old idl style time, lat, lon, elev, brightness = [], [], [], [], [] x, y, z = [], [], [] for item in load_traj_file(fname): dumvar = item.split(',') if len(dumvar) == 11: #correct number of fields time.append(str(dumvar[0])) #iso string lat.append(float(dumvar[1])) #in deg lon.append(float(dumvar[2])) # in deg elev.append(float(dumvar[3])) #in m x.append(float(dumvar[4])) #in km y.append(float(dumvar[5])) #in km z.append(float(dumvar[6])) #in km brightness.append(float(dumvar[10])) # float logger.debug('split_traj_finished, ' + str(fname)) # 2 sets of data from 2 cameras here # try globally sorting by time lat.sort(key=dict(zip(lat, time)).get) lon.sort(key=dict(zip(lon, time)).get) elev.sort(key=dict(zip(elev, time)).get) x.sort(key=dict(zip(x, time)).get) y.sort(key=dict(zip(y, time)).get) z.sort(key=dict(zip(z, time)).get) brightness.sort(key=dict(zip(brightness, time)).get) time.sort() #keep time as str return time, lat, lon, elev, x, y, z, brightness
def plot_noise_spec(specList,cp,dir,dag,qjob,qfile,tftuple): fignames = [] time = list(set(map(operator.itemgetter(0),tftuple))) time.sort() freq = list(set(map(operator.itemgetter(1),tftuple))) freq.sort() freq = array(freq,typecode='f') Time = array(time,typecode='d') - time[0] X,Y = meshgrid(Time,freq) start = str(time[0]) end = str(time[-1]) flat_specList = [] tftuple.sort(key=operator.itemgetter(2)) MIN = tftuple[0][2] MINTime = tftuple[0][0] tftuple.sort(key=operator.itemgetter(2),reverse=True) MAX = tftuple[0][2] MAXTime = tftuple[0][0] OUTLIER = [1-MIN, MAX-1] if (1-MIN) > (MAX-1): qscanTime = MINTime else: qscanTime = MAXTime dag.add_node(qscanNode(qjob,qscanTime,qfile,cp.get('pipeline','ifo'),dir,OUTLIER)) figname = str(max(OUTLIER))+'-'+dir + '-' + start + '-' + end + '.png' A = array(specList,typecode='f') figure(1) pcolor(X,Y,A.transpose(),shading='flat',vmin=0.95,vmax=1.05) print "...plotting qscan for " + start title('h(t) and h(f) power ratios per freq bin GPS '+start + '\n min = '+str(MIN) + ' max = '+str(MAX) ) xlabel('Time') ylabel('Frequency') colorbar() savefig(dir + '/'+ figname) thumb = 'thumb-'+figname savefig(dir + '/'+ thumb,dpi=20) clf() #close() return figname,qscanTime
def time_func(self, func, shapes=((1,), (1000,), (100, 100)), iters=50, timeout=2000.0, verbose=True): np_time = [] time = [] argspec = inspect.getargspec(func) for i in range(iters): for shape in shapes: np_args, args = self.make_args(argspec, shape) try: np_time.append(func(*np_args)) except: np_time.append(np.inf) try: time.append(func(*args)) except Exception as e: return -1, -1, -1 if time[-1] > timeout: if verbose: print "{}.{} timed out".format(self.name, func.__name__) return 20.0, 20.0, 20.0 # Get rid of the top and bottom 2 if iters > 10: np_time.sort() np_time = np.asarray(np_time[2:-2]) time.sort() time = np.asarray(time[2:-2]) mean = np.mean(time) std = np.std(time) np_rel = np.sum(time) / np.sum(np_time) if verbose: print "{}.{}: {:.3f} +/- {:.2f} ms, {:.2f}x numpy".format( self.name, func.__name__, mean, std, np_rel) return mean, std, np_rel
def normalize(atrr, old, new): d1 = datetime.datetime.combine(datepicker1.value, datetime.time()) d2 = datetime.datetime.combine(datepicker2.value, datetime.time()) wd1 = d1.weekday() wd2 = d2.weekday() d1 = d1 - datetime.timedelta(wd1) d2 = d2 - datetime.timedelta(wd2) t = list(norm.keys()) t.sort() new = [] for n in range(len(t)): date = datetime.datetime.strptime(t[n], '%Y-%m-%d %H:%M:%S') if date >= d1 and date <= d2: new.append(t[n]) num_ticks = [norm[m] for m in new] new_y = [] if checkbox_group.active == [0]: for y in source.data['freq']: divide = [a / b for a, b in zip(y, num_ticks)] new_y.append(divide) else: for y in source.data['freq']: new_y.append(y) y_max = 0.1 for lst in new_y: if max(lst) > y_max: y_max = max(lst) y_max += (y_max * 0.1) plot.y_range.end = y_max source.data['freq'] = new_y
def highUtiltiy(Table, dsit, tHold, lines): ordered = [] time = [] factor = [] price = [] mean = 0 for i in Table: if Table[i] > (len(lines) * tHold): ordered.append([Table[i], i, dsit[i]]) time.append([dsit[i], i]) factor.append([Table[i] * dsit[i], i]) ordered.sort(reverse=True) factor.sort(reverse=True) time.sort(reverse=True) max_utlity = 0 min_utlity = 10000000000000 for i in ordered: a = i[1].split(',') b = [int(j) for j in a] pc = 0 for h in b: pc = pc + list[h] if pc > max_utlity: max_utlity = pc if pc < min_utlity: min_utlity = pc price.append(pc) for x in factor: mean += x[0] mean = mean / len(factor) draw(ordered, price, 'after apriori') tot = 0 print('itemset before high uility pruning and after applying apriori-') print('(X,Y..:frequency)') for i in ordered: tot += 1 print(tot, ')', i[1], ':', i[0]) tot = 0 print('1.enter range {0} to {1} 2.enter item numbers. 3.AutoPrune'.format( min_utlity, max_utlity)) opt = int(input()) if (opt == 1): loc = [] print('enter threshold for high utilty =') thr = int(input()) print('itemset after pruning-') print('(X,Y..:frequency)') for i, j in zip(ordered, range(0, len(price))): if ((price[j]) > thr): loc.append(j) tot += 1 print(tot, ')', i[1], ':', i[0], 'time-', i[2], 'factor-', i[0] * i[2]) print('item set price->{0}rs and the total sale ->{1}'.format( price[j], price[j] * i[0])) #print(b) temp1 = [ordered[a] for a in loc] temp2 = [price[a] for a in loc] #label=[j[1] for j in temp1] fer_var1 = [j[0] for j in temp1] tim_var2 = [j[2] for j in temp1] temp3 = [i[0] * j / 10000 for i, j in zip(temp1, temp2)] #factor_new=[i*j for i,j in zip(fer_var1,tim_var2)] x, y, c, s = rand(4, len(fer_var1)) draw(temp1, temp2, 'new') index = [] def onpick3(event): ind = event.ind index.append(ind[0]) print('onpick3 scatter:', ind, np.take(fer_var1, ind), np.take(tim_var2, ind)) fig, ax = plt.subplots() col = ax.scatter(fer_var1, tim_var2, temp3, c, picker=True) ax.set_title('Time vs Frequency vs Profit') #fig.savefig('pscoll.eps') fig.canvas.mpl_connect('pick_event', onpick3) #plt.set_title('Time vs Frequency vs Profit') plt.show() print('The selected itemsets are-') print([temp1[j][1] for j in index]) elif (opt == 2): print('enter item numbers (Eg-x,y,z,..)') opt = input() temp = opt.split(',') list_hui = [int(j) for j in temp] print(list_hui) for i in ordered: a = i[1].split(',') b = [int(j) for j in a] for x in list_hui: if (x in b): tot += 1 print(tot, ')', i[1], ':', i[0]) break else: tot = 0 x = [(a[0]) for a in time] y = [a[0] for a in ordered] plt.scatter(x, y, marker='^') plt.show() for i in factor: if (i[0] > mean): tot += 1 print(tot, ')', i[1], ':factor-', i[0])
time = filter(regex.search, content) # time = [1.0 / float(line.split()[-7][1:]) for line in time] print len(time) # print time[0] rec.append(len(time)) # if len(time) < 50: # continue time = [line.split()[1] for line in time] for i in range(len(time)): if 'ms' in time[i]: time[i] = float(time[i][:-2]) / 1e3 elif 'us' in time[i]: time[i] = float(time[i][:-2]) / 1e6 elif 'ns' in time[i]: time[i] = float(time[i][:-2]) / 1e9 else: time[i] = float(time[i][:-1]) time.sort() # time = time[len(time)/3:len(time)*2/3] aver_time = np.mean(time) std_time = np.std(time) rec.append(str(aver_time)) rec.append(str(std_time)) rec.append("%f" % (std_time / aver_time)) print rec csvWriter.writerow(rec)
def load_traj_MOP(fname): """load an MOP trajectory file, called by load_split its got 2 tables each with header names and unordered columns""" time, lat, lon, elev, brightness = [], [], [], [], [] x, y, z = [], [], [] #load all the comments and find the event names tab1 = [] tab2 = [] with open(fname, 'rt') as f: ef1 = f.readline() ef2 = f.readline() loc1 = f.readline() header1 = f.readline().split(',') while True: dat = f.readline() if dat.startswith('#'): break tab1.append(dat) loc2 = dat header2 = f.readline().split(',') while True: dat = f.readline() if not dat or dat.startswith('#'): break tab2.append(dat) #extract out named columns header1[0] = header1[0].lstrip('#') header2[0] = header2[0].lstrip('#') header1 = [a.strip() for a in header1] header2 = [a.strip() for a in header2] tabd1 = [] for row in tab1: dd = {} dat = row.split(',') dat = [a.strip() for a in dat] for b in range(len(dat)): dd[header1[b]] = dat[b] tabd1.append(dd) tabd2 = [] for row in tab2: dd = {} dat = row.split(',') dat = [a.strip() for a in dat] for b in range(len(dat)): dd[header2[b]] = dat[b] tabd2.append(dd) #tabd = list of dicts for tab in (tabd1, tabd2): for row in tab: #print(row) time.append(row['datetime']) #iso string lat.append(row['latitude']) #in deg lon.append(row['longitude']) # in deg elev.append(float(row['height'])) #in m x.append(float(row['X_geo'])) #in km y.append(float(row['Y_geo'])) #in km z.append(float(row['Z_geo'])) #in km if 'brightness' in row: brightness.append(row['brightness']) # float else: brightness.append(255.0) # float logger.debug('split_traj_finished, ' + str(fname)) # 2 sets of data from 2 cameras here # try globally sorting by time lat.sort(key=dict(zip(lat, time)).get) lon.sort(key=dict(zip(lon, time)).get) elev.sort(key=dict(zip(elev, time)).get) x.sort(key=dict(zip(x, time)).get) y.sort(key=dict(zip(y, time)).get) z.sort(key=dict(zip(z, time)).get) brightness.sort(key=dict(zip(brightness, time)).get) time.sort() #keep time as str return time, lat, lon, elev, x, y, z, brightness
def update_data(attrname, old, new): d1 = datetime.datetime.combine(datepicker1.value, datetime.time()) d2 = datetime.datetime.combine(datepicker2.value, datetime.time()) wd1 = d1.weekday() wd2 = d2.weekday() d1 = d1 - datetime.timedelta(wd1) d2 = d2 - datetime.timedelta(wd2) p = phrase.value p = p.split(",") x_list = [] y_list = [] group_list = [] color_list = [] colors = [ 'red', 'blue', 'green', 'purple', 'black', 'pink', 'orange', 'brown' ] indices = [0, 0] done = 0 tt = {} for d in top_ten: if str(d1) not in top_ten and str(d2) not in top_ten: date = str(d2) while date not in top_ten: date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S') date = date - datetime.timedelta(7) date = str(date) count_freq = Counter(top_ten[date]) top_10 = count_freq.most_common(10) elif str(d1) not in top_ten: count_freq = Counter(top_ten[str(d2)]) top_10 = count_freq.most_common(10) elif str(d2) not in top_ten: date = str(d2) while date not in top_ten: date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S') date = date - datetime.timedelta(7) date = str(date) tt = dict(Counter(top_ten[date]) - Counter(top_ten[str(d1)])) count_freq = Counter(tt) top_10 = count_freq.most_common(10) else: tt = dict(Counter(top_ten[str(d2)]) - Counter(top_ten[str(d1)])) count_freq = Counter(tt) top_10 = count_freq.most_common(10) df = pd.DataFrame(top_10, columns=["ngram", "frequency"]) table_source.data = {'ngram': df.ngram, 'frequency': df.frequency} for e in range(len(p)): p[e] = p[e].strip() for w in weeks: if w not in data[p[e]]: data[p[e]][w] = 0 t = list(data[p[e]].keys()) t.sort() new = [] x = [] for n in range(len(t)): date = datetime.datetime.strptime(t[n], '%Y-%m-%d %H:%M:%S') if date >= d1 and date <= d2: new.append(t[n]) x.append(date) y = [data[p[e]][m] for m in new] x_list.append(x) y_list.append(y) group_list.append(p[e]) col = e % 8 color_list.append(colors[col]) y_max = 1 for lst in y_list: if max(lst) > y_max: y_max = max(lst) y_max += (y_max * 0.1) plot.y_range.end = y_max source.data = dict(date=x_list, freq=y_list, group=group_list, color=color_list)