def main(): shifts = [[-1, 1], [0, 1], [1, 1], [-1, 0], [1, 0], [-1, -1], [0, -1], [1, -1]] num_atoms = 100 num_dims = 2 # dimensions coords = pl.random((num_atoms, num_dims)) chosen = pl.random_integers(num_atoms) # from 1 to num_atoms chosen -= 1 # from 0 to num_atoms - 1 for i in range(len(shifts)): coords = pl.vstack((coords, coords[:num_atoms] + shifts[i])) num_atoms *= 9 # after 8 shifts added max_distance = 0.9 for i in range(num_atoms): if i != chosen: dx = coords[chosen, 0] - coords[i, 0] dy = coords[chosen, 1] - coords[i, 1] distance = pl.sqrt(dx * dx + dy * dy) if distance < max_distance: pl.plot([coords[i, 0]], [coords[i, 1]], "bo") else: pl.plot([coords[i, 0]], [coords[i, 1]], "ko") # plot last for visibility pl.plot([coords[chosen, 0]], [coords[chosen, 1]], "ro") pl.grid(True) pl.show()
def main(): shifts = [ [-1, 1], [0, 1], [1, 1], [-1, 0], [1, 0], [-1, -1], [0, -1], [1, -1] ] num_atoms = 100 num_dims = 2 # dimensions coords = pl.random((num_atoms, num_dims)) chosen = pl.random_integers(num_atoms) # from 1 to num_atoms chosen -= 1 # from 0 to num_atoms - 1 for i in range(len(shifts)): coords = pl.vstack((coords, coords[:num_atoms] + shifts[i])) num_atoms *= 9 # after 8 shifts added max_distance = 0.9 for i in range(num_atoms): if i != chosen: dx = coords[chosen, 0] - coords[i, 0] dy = coords[chosen, 1] - coords[i, 1] distance = pl.sqrt(dx*dx + dy*dy) if distance < max_distance: pl.plot([coords[i, 0]], [coords[i, 1]], "bo") else: pl.plot([coords[i, 0]], [coords[i, 1]], "ko") # plot last for visibility pl.plot([coords[chosen, 0]], [coords[chosen, 1]], "ro") pl.grid(True) pl.show()
def main(): num_atoms = 64 num_dims = 2 # dimensions coords = pl.random((num_atoms, num_dims)) axis_limits = [0.0, 1.0] points = plot_atoms(coords, num_atoms, axis_limits) update_limit = 16 update_count = 0 while True: chosen = pl.random_integers(num_atoms) - 1 # [0, num_atoms - 1] new_x, new_y = new_xy(coords, chosen, axis_limits) energy_old, energy_new = energies(coords, chosen, num_atoms, new_x, new_y) if energy_new < energy_old: coords[chosen, 0] = new_x coords[chosen, 1] = new_y points[chosen].set_data([new_x, new_y]) update_count += 1 if not update_count < update_limit: pl.draw() update_count = 0 """
def make_scatter(): f = open('accel_glitches.pkl') times = pickle.load(f) f.close() dt = timedelta(minutes=10) fig = pl.figure() which_times = pl.random_integers(0, len(times) - 1, 5) for i in which_times: t = times[i] start = sptz.localize(t - dt) end = sptz.localize(t + dt) try: fig = acc_corr_plots(start, end, fig) except RuntimeError, err: print 'Fail! ' + str(err)
def fasta_random_sample(a): if a.num: num = a.num else: num = '100' if a.ofile: out = a.ofile else: out = a.ifile.split('.')[0] + '_random' + num + '.fa' fa = list(parse(a.ifile, 'fasta')) rand = random_integers(0, len(fa), int(num)) n = 0 with open(out, 'w') as f: for rec in fa: if n in rand: f.write('>' + rec.id + '\n') f.write(str(rec.seq) + '\n') n += 1
n = double(data[:, 2]) # Only keep first M papers to record >=N neurons M = 10 idx = [] for i in range(0, len(t)): if sum(t[i] > t[n >= n[i]]) < M: idx.append(i) idx = asarray(idx) # Exponential fit with bootstrap standard error (could use statsmodels here) (ar, br) = polyfit(t[idx], log(n[idx]), 1) arb = zeros(1000) brb = zeros(1000) for i in range(0, 1000): sidx = idx[random_integers(0, len(idx) - 1, len(idx))] (arb[i], brb[i]) = polyfit(t[sidx], log(n[sidx]), 1) t0 = linspace(min(t) - 1, max(t) + 1, 100) nhat = polyval([ar, br], t0) x = t[idx] y = log(n[idx]) yhat = br + x * ar ci = ( stats.t.isf(0.05 / 2, len(x) - 2) * sqrt(sum(pow(y - yhat, 2)) / (len(y) - 2)) * sqrt(1 / len(y) + pow(t0 - mean(x), 2) / sum(pow(x - mean(x), 2))) ) sns.set_style("ticks") sns.set_palette(sns.color_palette("deep"))