def vel_verlet_step(pos_list, vel_list, sp): """The velocity Verlet algorithm, returning position and velocity matrices""" with timing('force_list'): if sp.use_numba: F = force_list_numba(pos_list, sp.L, sp.eps, sp.sigma, sp.rc) elif sp.use_cython: F = ljc.force_list(pos_list, sp) elif sp.use_fortran: F = ljf.force_list(pos_list, sp.L, sp.eps, sp.sigma, sp.rc, np.linalg.inv) elif sp.use_cfortran: F = ljcf.force_list(pos_list, sp) else: F = force_list(pos_list, sp) pos_list2 = pos_list + vel_list * sp.dt + F * sp.dt**2 / 2 with timing('force_list'): if sp.use_numba: F2 = force_list_numba(pos_list2, sp.L, sp.eps, sp.sigma, sp.rc) elif sp.use_cython: F2 = ljc.force_list(pos_list2, sp) elif sp.use_fortran: F2 = ljf.force_list(pos_list2, sp.L, sp.eps, sp.sigma, sp.rc, np.linalg.inv) elif sp.use_cfortran: F2 = ljcf.force_list(pos_list2, sp) else: F2 = force_list(pos_list2, sp) vel_list2 = vel_list + (F + F2) * sp.dt / 2 Npasses = np.sum(pos_list2 - pos_list2 % sp.L != 0, axis=1) pos_list2 = pos_list2 % sp.L return pos_list2, vel_list2, Npasses
def integrate(pos_list, vel_list, sp): """ Verlet integration for Nt steps. Save each thermo-multiple step into xyz_frames. Mass set to 1.0. """ # N = pos_list.shape[0] # Nframes = int(sp.Nt // sp.thermo) n_fr = 1 # xyz_frames = np.zeros((N, 3, Nframes)) E = np.zeros(sp.Nt) T = np.zeros(sp.Nt) # 1st Verlet step with timing('force_list'): if sp.use_numba: F = force_list_numba(pos_list, sp.L, sp.eps, sp.sigma, sp.rc) elif sp.use_cython: F = ljc.force_list(pos_list, sp) else: F = force_list(pos_list, sp) pos_list = pos_list + vel_list * sp.dt + F * sp.dt**2 / 2 with timing('tot_PE'): if sp.use_numba: E[0] = tot_KE(vel_list) + tot_PE_numba(pos_list, sp.eps, sp.sigma, sp.rc) elif sp.use_cython: E[0] = tot_KE(vel_list) + ljc.tot_PE(pos_list, sp) else: E[0] = tot_KE(vel_list) + tot_PE(pos_list, sp) T[0] = temperature(vel_list) # Other steps for i in range(1, sp.Nt): pos_list, vel_list, Npasses = vel_verlet_step(pos_list, vel_list, sp) with timing('tot_PE'): if sp.use_numba: E[i] = tot_KE(vel_list) + tot_PE_numba(pos_list, sp.eps, sp.sigma, sp.rc) elif sp.use_cython: E[i] = tot_KE(vel_list) + ljc.tot_PE(pos_list, sp) else: E[i] = tot_KE(vel_list) + tot_PE(pos_list, sp) T[i] = temperature(vel_list) if i % sp.thermo == 0: # xyz_frames[:, :, n_fr] = pos_list if sp.dump: fname = "Dump/dump_" + str(i*sp.thermo) + ".xyz" save_xyzmatrix(fname, pos_list) print("Step: %i, Temperature: %f" % (i, T[i])) n_fr += 1 # return xyz_frames, E return E
def from_classifier_no_cv(cls, classifier, X, y, pos_label=1): """ Create BaseValidationOutput object from cross validation of classifier """ X = X if type(X) is pd.core.frame.DataFrame else pd.DataFrame(X) y = y if type(y) is pd.core.series.Series else pd.Series(y) df = pd.DataFrame(columns=cls.output_columns) feature_importances = [] logger.debug('Fitting model') with timing(logger, 'model fit'): classifier.fit(X, y) true_col = [int(x) for x in pd.Series(y.tolist()) == pos_label] pred_col = classifier.predict_proba(X)[:, pos_label].tolist() fold_col = [0] * len(true_col) data = np.matrix([true_col, pred_col, fold_col]) df = df.append(pd.DataFrame(data.T, columns=cls.output_columns), ignore_index=True) feature_importances.append( BaseValidationOutput.get_feature_importances(classifier)) df[cls.true_col] = df[cls.true_col].astype(int) df[cls.fold_col] = df[cls.fold_col].astype(int) feature_importances = pd.DataFrame(feature_importances, columns=X.columns) return cls(df, feature_importances)
def updatestate(self): self.erate = self.size / 200 self.fat /= 1.01 tmvx = self.velx tmvy = self.vely self.px += self.velx self.py += self.vely self.velx = self.stamina * (self.velx + self.ax) self.vely = self.stamina * (self.vely + self.ay) self.cspd = math.sqrt(self.velx * self.velx + self.vely * self.vely) if self.fat < 0.1: self.dtime = self.dtime - t.timing(ticks=10, days=0, eons=0) if self.cspd > self.speed: self.velx = self.velx / self.cspd * self.speed self.vely = self.vely / self.cspd * self.speed if self.cspd > 1: self.stamina /= 1.001 if self.cspd <= 1: self.stamina *= 1.001 if self.stamina > self.stamina_cap * 5: self.stamina = self.stamina_cap * 5 if self.ax == 0: self.velx = tmvx if self.ay == 0: self.vely = tmvy self.counter += 1 if self.counter == self.action_time: self.counter = 0
def __init__(self, x, y, btime, parent=None): self.btime = btime if parent == None: self.size = np.random.uniform(low=5.0, high=50) self.size_norm = self.size / 50.0 self.size_cap = 50 self.speed = np.random.uniform(low=0.0, high=10.0) self.speed_norm = self.speed / 10.0 self.attack = np.random.uniform(low=0.0, high=1.0) self.stamina_cap = np.random.uniform(low=0.0, high=1.0) self.aggressiveness = np.random.uniform(low=0.0, high=1.0) self.metabolism = np.random.uniform(low=0.0, high=1.0) self.sight = np.random.uniform(low=0.0, high=200.0) self.sight_norm = self.sight / 200.0 self.grate = np.random.uniform(low=0.0, high=1.0) self.hunger = np.random.uniform(low=0.0, high=1.0) self.brate = np.random.uniform(low=0.0, high=1.0) self.drate = np.random.uniform(low=0.0, high=1.0) else: self.size = parent.size + np.random.normal(0.0, 0.1) self.size_norm = self.size / 50.0 self.size_cap = 50 self.speed = parent.speed + np.random.normal(0.0, 0.1) self.speed_norm = self.speed / 10.0 self.attack = parent.attack + np.random.normal(0.0, 0.1) self.stamina_cap = parent.stamina_cap + np.random.normal(0.0, 0.1) self.aggressiveness = parent.aggressiveness + np.random.normal( 0.0, 0.1) self.metabolism = parent.metabolism + np.random.normal(0.0, 0.1) self.sight = parent.sight + np.random.normal(0.0, 0.1) self.sight_norm = self.sight / 200.0 self.grate = parent.grate + np.random.normal(0.0, 0.1) self.hunger = parent.hunger + np.random.normal(0.0, 0.1) self.brate = parent.brate + np.random.normal(0.0, 0.1) self.drate = parent.drate + np.random.normal(0.0, 0.1) self.erate = self.size / 200 self.stamina = self.stamina_cap * 5 self.dtime = int(self.drate / (self.metabolism) * 1000) self.action_time = int(50 * self.stamina / self.metabolism) self.fat = 0.5 if self.action_time < 50: self.action_time = 50 a = t.timing(self.dtime, 0, 0) self.dtime = self.btime + a self.counter = 0 #print(self.dtime.eons,self.dtime.days,self.dtime.ticks) self.normalize() self.px = x self.py = y self.velx = 0 self.vely = 0 self.ax = 0 self.ay = 0 self.wheel = r.roulette(self)
def test(self, X, y, pos_label=1): """ Instantiate the wrapper's holdout validation output using trained model """ X_copy = X if type(X) is pd.core.frame.DataFrame: ohe_cols_base = [] make_dummy = [] # one-hot encode strings if there are 16 or fewer unique values ohe_cols_base = [] make_dummy = [] for idx, dtyp in X.dtypes.iteritems(): if str(dtyp) == 'object': if (X[idx].nunique() > 16) or (idx in self.ohe_cols_base): make_dummy.append(idx) else: ohe_cols_base.append(idx) elif str(dtyp) == 'bool': X_copy[idx] = X_copy[idx].astype(float) elif str(dtyp) == 'datetime64[ns]': X_copy[idx] = X_copy[idx].dt.dayofyear.astype(float) for dummy in make_dummy: X_copy[dummy] = 1.0 ohe_prefixes = [ 'OHE_{0:02d}_{1}'.format(i, col) for i, col in enumerate(ohe_cols_base) ] X_ohe = pd.get_dummies( X_copy, columns=ohe_cols_base, prefix=ohe_prefixes, drop_first=True, ) for col in self.all_ohe_cols: if col not in X_ohe.columns: X_ohe[col] = 0 droplist = [] for col in X_ohe.columns: if col not in self.all_ohe_cols: droplist.append(col) X_ohe = X_ohe.drop(droplist, axis=1, inplace=False) if not self.trained: raise ValueError('Classifier not fit on train set') logger.debug('Predicting on holdout set') with timing(logger, 'Prediction complete'): self._holdout_validaiton_output = HoldoutValidationOutput.from_classifier( self.classifier, X_ohe, y, pos_label)
def build_cluster_tree(docs, weight_matrix, document_abs, norm_termfreq, termfreq, vocabular): with timing("Preparing distances"): distances = [] n_rows = len(docs) # build bottom of cluster tree clusterLeafs = [ClusterLeaf(doc) for doc in docs] sorted_mapping = {voc: idx for idx, voc in enumerate(vocabular)} for i in range(n_rows): doc = docs[i] doc.distances[doc.name] = 1.0 doc.set_norm_freq(termfreq[i], norm_termfreq[i], sorted_mapping) for j in range(i + 1, n_rows): dist = calc.distance(weight_matrix[i], weight_matrix[j], document_abs[i], document_abs[j]) distances.append(Distance(dist, clusterLeafs[i], clusterLeafs[j])) doc.distances[docs[j].name] = dist docs[j].distances[doc.name] = dist with timing("Calling maketree"): return maketree(distances, clusterLeafs)
def init_pos(N, sp): np.random.seed(sp.seed) E_cut = 1e5 E = E_cut * 2 count = 0 while E > E_cut: pos_list = np.random.rand(N, 3) * sp.L with timing('tot_PE'): if sp.use_numba: E = tot_PE_numba(pos_list, sp.eps, sp.sigma, sp.rc) elif sp.use_cython: E = ljc.tot_PE(pos_list, sp) else: E = tot_PE(pos_list, sp) count += 1 return pos_list, count, E
def from_classifier(cls, classifier, X, y, n_folds=5, pos_label=1): """ Create BaseValidationOutput object from cross validation of classifier """ X = X if type(X) is pd.core.frame.DataFrame else pd.DataFrame(X) y = y if type(y) is pd.core.series.Series else pd.Series(y) df = pd.DataFrame(columns=cls.output_columns) skf = StratifiedKFold(n_splits=n_folds, shuffle=False) feature_importances = [] for i, (train, test) in enumerate(skf.split(X, y)): fold_label = i + 1 logger.debug('Fitting fold %i' % fold_label) with timing(logger, 'Fold %i fit' % fold_label): classifier.fit(X.iloc[train], y.iloc[train]) true_col = [ int(x) for x in pd.Series(y.iloc[test].tolist()) == pos_label ] pred_col = classifier.predict_proba( X.iloc[test])[:, pos_label].tolist() fold_col = [i] * len(test) data = np.matrix([true_col, pred_col, fold_col]) df = df.append(pd.DataFrame(data.T, columns=cls.output_columns), ignore_index=True) feature_importances.append( BaseValidationOutput.get_feature_importances(classifier)) df[cls.true_col] = df[cls.true_col].astype(int) df[cls.fold_col] = df[cls.fold_col].astype(int) feature_importances = pd.DataFrame(feature_importances, columns=X.columns) return cls(df, feature_importances)
merge_sort(left) merge_sort(right) i = j = k = 0 # Copy data to temp arrays while i < len(left) and j < len(right): if left[i] < right[j]: array[k] = left[i] i += 1 else: array[k] = right[j] j += 1 k += 1 # Checking if any element was left while i < len(left): array[k] = left[i] i += 1 k += 1 while j < len(right): array[k] = right[j] j += 1 k += 1 return array timing(merge_sort)(array)
import random from timing import timing array = random.sample(range(1, 1000), 999) def bubble_sort(array): """ Time Complexity: O(n²) Space Complexity: O(1) """ n = len(array) # Traverse through all array elements for i in range(n): # Last i elements are already in place for j in range(0, n - i - 1): # Traverse the array from 0 to n - i - 1 if array[j] > array[j + 1]: # Swap array[j], array[j + 1] = array[j + 1], array[j] return array timing(bubble_sort)(array)
#!/usr/bin/env python #coding=utf-8 from timing import timing if __name__ == '__main__': @timing(1) def fib1(n): x, y = 0, 1 while(n): x, y, n = y, x+y, n-1 return x fib2 = lambda n: 1 if n <= 2 else fib2(n-1) + fib2(n-2) fib3 = lambda n, x=0, y=1: x if not n else fib3(n-1, y, x+y) fib1(30) timing(1)(fib2)(30) timing(1)(fib3)(30)
def run_game(): worldtime = t.timing(ticks=0, days=0, eons=0) population = 10 vegetation = 10 xlist = np.random.uniform(0, 1920, population) ylist = np.random.uniform(0, 1080, population) xplist = np.random.uniform(0, 1920, vegetation) yplist = np.random.uniform(0, 1080, vegetation) animals = [] plants = [] for i in range(population): animals.append(a.animal(int(xlist[i]), int(ylist[i]), worldtime)) for i in range(vegetation): plants.append(p.plant(int(xplist[i]), int(yplist[i]), 0.01)) clock = pg.time.Clock() pg.init() screen = pg.display.set_mode((1920, 1080)) #comment out for fast sim pg.display.set_caption("first screen") #comment out for fast sim while True: for event in pg.event.get(): if event.type == pg.QUIT: sys.exit() screen.fill((0, 0, 0)) #comment out for fast sim color = (255, 100, 0) for i in range(vegetation): plants[i].grow() for j in range(population): dist = np.sqrt((plants[i].x - animals[j].px)**2 + (plants[i].y - animals[j].py)**2) if dist <= (plants[i].size + animals[j].size): plants[i].eaten(animals[j].erate) animals[j].grow() pg.draw.circle(screen, (100, 255, 0), [int(round(plants[i].x)), int(round(plants[i].y))], int(plants[i].size), 0) #comment out for fast sim kill_plant_list = [] kill_animal_list = [] for i in range(vegetation): if (plants[i].size < 1): kill_plant_list.append(i) for plnt in kill_plant_list: plants.pop(plnt) newanimlist = [] for i in range(population): x = (animals[i].px) y = (animals[i].py) if (x > 1920 - animals[i].size or x < animals[i].size): animals[i].velx = -int(0.5 * animals[i].velx) if (x > 1920 - animals[i].size): animals[i].px = 1920 - animals[i].size else: animals[i].px = animals[i].size if (y > 1080 - animals[i].size or y < animals[i].size): animals[i].vely = -int(0.5 * animals[i].vely) if (y > 1080 - animals[i].size): animals[i].py = 1080 - animals[i].size else: animals[i].py = animals[i].size #animals do something here for now random motion newanim = animals[i].animal_action(worldtime) if newanim is not None: newanimlist.append(newanim) animals[i].impulse(np.random.normal(0.0, 0.1), np.random.normal(0.0, 0.1)) if (worldtime > animals[i].dtime): kill_animal_list.append(i) pg.draw.circle( screen, color, [int(round(animals[i].px)), int(round(animals[i].py))], int(animals[i].size), 0) #comment out for fast sim animals = animals + newanimlist for anml in kill_animal_list: animals.pop(anml) vegetation = len(plants) population = len(animals) pg.display.flip() #comment out for fast sim worldtime.next() clock.tick(30)
while len(trip) > 1: edges.add((heappop(trip)[1], trip[0][1])) return edges if __name__ == "__main__": # Get data path """The path to the data files can be set using a script argument. For example, if you execute Python from the workspace root, you can enter: `python src/gtfs.py ./data/`. Or, if you execute Python from the `src/` directory: `python gtfs.py ../data/`. """ DATAPATH = argv[1] if len(argv) > 1 else "../data/" # Import data (stops, id_map), exetime = timing(import_stops)(join(DATAPATH, "stops.txt")) print("Imported {0} stops in {1}ms".format(len(stops), exetime * 1e3)) edges, exetime = timing(import_edges)(join(DATAPATH, "stop_times.txt"), id_map) print("Imported {0} edges in {1}ms".format(len(edges), exetime * 1e3)) # Construct graph exetime = perf_counter() GRAPH = Graph(stops, compute_weight=lambda u, v: sqrt( (v.position[0] - u.position[0])**2 + (v.position[1] - u.position[1])**2)) for start, end in edges: GRAPH.add_edge(start, end) print("Constructed graph in {0}ms".format( (perf_counter() - exetime) * 1e3)) # Construct pathfinders
def binary_search(sorted_arr, search): """O(log n)""" n = len(sorted_arr) # Split arr if n >= 1: mid = n // 2 if sorted_arr[mid] == search: return True elif sorted_arr[mid] < search: return binary_search(sorted_arr[mid + 1:], search) else: return binary_search(sorted_arr[:mid], search) else: return False def linear_search(sorted_arr, search): """O(n)""" for element in sorted_arr: if element == search: return True return False sorted_arr = range(0, 1000) search = 999 print(timing(linear_search)(sorted_arr, search)) print(timing(binary_search)(sorted_arr, search))
def train(self, X, y, n_folds=5, pos_label=1, roll_up_feature_importances=True): """ Instantiate the wrapper's cross validation output and train the model on full feature data set. If run with n_folds=0 it skips the CV part. """ # This section deals with non-numeric data types (one-hot encoding, casting) ohe_bool = False X_copy = deepcopy(X) if type(X) is pd.core.frame.DataFrame: # one-hot encode strings if there are 16 or fewer unique values # TODO: 16 should not be hard-coded ohe_cols_base = [] for idx, dtyp in X.dtypes.iteritems(): if str(dtyp) == 'object': if X[idx].nunique() > 16: X_copy[idx] = 1.0 else: ohe_cols_base.append(idx) elif str(dtyp) == 'bool': X_copy[idx] = X_copy[idx].astype(float) elif str(dtyp) == 'datetime64[ns]': X_copy[idx] = X_copy[idx].dt.dayofyear.astype(float) ohe_bool = len(ohe_cols_base) != 0 # TODO: # of digits to use in naming scheme shouldn't be hard-coded ohe_prefixes = [ 'OHE_{0:02d}_{1}'.format(i, col) for i, col in enumerate(ohe_cols_base) ] X_ohe = pd.get_dummies( X_copy, columns=ohe_cols_base, prefix=ohe_prefixes, drop_first=True, ) self.ohe_cols_base = ohe_cols_base self.all_ohe_cols = X_ohe.columns # self.cat_dict = dict() # for col in ohe_cols_base: # self.cat_dict[col] = list(X[col].unique()) if n_folds == 0: logger.debug('Skipping cross-validation') temp_cross_validation_output = \ CrossValidationOutput.from_classifier_no_cv( self.classifier, X_ohe, y, pos_label) else: logger.debug('Running cross-validation') with timing(logger, 'Cross-validation complete'): temp_cross_validation_output =\ CrossValidationOutput.from_classifier( self.classifier, X_ohe, y, n_folds, pos_label) if ohe_bool and roll_up_feature_importances: feature_importances_full = temp_cross_validation_output.feature_importances feature_importances = pd.DataFrame( feature_importances_full.mean()).transpose() f_i_cols = feature_importances.columns ohe_cols = [] non_ohe_cols = [] for col in f_i_cols: if re.match("^OHE_\d\d_", col) is not None: ohe_cols.append(col) else: non_ohe_cols.append(col) ohe_feat_imp = feature_importances[ohe_cols]\ .transpose()\ .rename(columns={0:'FEAT_IMP'})\ .reset_index() top_ohe_index = ohe_feat_imp.groupby( ohe_feat_imp['index'].str.slice(0, 6))['FEAT_IMP'].idxmax().values top_ohe_col_names = list( ohe_feat_imp.loc[top_ohe_index, :]['index'].values) ohe_decode = {} for col in ohe_prefixes: ohe_decode[col[:6]] = col[7:] full_cols_with_OHE = non_ohe_cols + top_ohe_col_names full_cols_no_OHE = non_ohe_cols \ + [ohe_decode[col[:6]] for col in ohe_prefixes] feature_importances = feature_importances_full[full_cols_with_OHE] feature_importances.columns = full_cols_no_OHE feature_importances = feature_importances.reindex( columns=X.columns) new_cross_val_output = CrossValidationOutput( temp_cross_validation_output.predictions, fi=feature_importances) else: new_cross_val_output = temp_cross_validation_output self.cross_validation_output = new_cross_val_output logger.debug('Fitting classifier on full training set') with timing(logger, 'Fitting complete'): self.classifier.fit(X_ohe, y) self._trained = True
import random from timing import timing array = random.sample(range(1, 1000), 999) def selection_sort(array): """ Time Complexity: O(n²) Space Complexity: O(1) """ n = len(array) # Traverse through all array elements for i in range(n): # Find the minimum element in remaining unsorted array min_idx = i for j in range(i + 1, n): if array[min_idx] > array[j]: min_idx = j # Swap array[i], array[min_idx] = array[min_idx], array[i] return array timing(selection_sort)(array)
def random_strings(length, n): result = [] for _ in range(n): result.append("".join( random.choice(ascii_letters) for _ in range(length))) return result def make_test_strings(mini, maxi, n): tests = [] for i in range(mini, maxi + 1): tests.append(random_strings(i, n)) return tests if __name__ == "__main__": mini = 1 maxi = 20 rep = 10 tests = make_test_strings(mini, maxi, rep) times = [] for t in tests: times.append(timing(reverse, t) / rep) fig, ax = plt.subplots() ax.plot(list(range(mini, maxi + 1)), times) ax.set_xlabel("Sequence size", fontsize=14) ax.set_ylabel("Time (ms)", fontsize=14) ax.grid(linestyle=":") plt.show()
from random import random from timing import timing data = [random() for _ in range(10**7)] initial = ''' from heapq import nsmallest from __main__ import data ''' setup = ''' nums = data[:] ''' top10_by_sort = ''' nums.sort() print nums[:10] ''' top10_by_heapq = ''' print nsmallest(10, nums) ''' if __name__ == '__main__': timing(initial, setup, top10_by_sort, times=1) timing(initial, setup, top10_by_heapq, times=1)
if i < 2: aux.append(1) else: aux.append(aux[i - 1] + aux[i - 2]) return aux[i] def fibonacci_memoization(n, _cache={}): """This function calculates the 'n'th term in the fibonacci series using memoization""" # Base case if n == 0: return 0 elif n < 2: return 1 # Recursion else: if n in _cache: return _cache[n] else: _cache[n] = fibonacci_memoization(n - 1, _cache) + fibonacci_memoization(n - 2, _cache) return _cache[n] n = 30 print(timing(fibonacci_recursive)(n)) print(timing(fibonacci_dynamic_programming)(n)) print(timing(fibonacci_memoization)(n))
sp = mydict(eps=eps, sigma=sigma, rc=rc, N=N, L=L, dt=dt, Nt=Nt, thermo=thermo, seed=seed, dump=args["--dump"], use_numba=args["--numba"], use_cython=args['--cython']) # system params print(" =========== \n LJ clusters \n ===========") print("Particles: %i | Temp: %f | Steps: %i | dt: %f | thermo: %i" % (N, T, Nt, dt, thermo)) if args["--dump"]: dumpdir = "Dump" if not os.path.exists(dumpdir): os.makedirs(dumpdir) # init system print("Initialising the system...") with timing('init'): pos_list, count, E = init_pos(N, sp) vel_list = init_vel(N, T) print("Number of trials: %i", count) # How to equilibrate? # run system print("Starting integration...") # xyz_frames, E = integrate(pos_list, vel_list, sp) with timing('integrate'): E = integrate(pos_list, vel_list, sp) # print into file # Nf = xyz_frames.shape[-1] # for i in range(Nf):