def test_reshape5(self): a = expr.arange((35511, )) b = expr.reshape(a, (133, 267)) c = expr.reshape(b, (267, 133)) d = expr.reshape(c, (1, 35511)) e = expr.arange((1, 35511)) Assert.all_eq(d.glom(), e.glom())
def test_reshape3(self): a = expr.arange((100, 100)) b = expr.reshape(a, (10000, )) c = expr.reshape(b, (10000, 1)) d = expr.reshape(c, (1, 10000)) e = expr.arange((1, 10000)) Assert.all_eq(d.glom(), e.glom())
def test_2d_2d(self): #Not dot with vector exactly, #just to make sure new feature hasn't break anything # Test with row > col av = expr.arange((132, 100)) bv = expr.arange((100, 77)) na = np.arange(13200).reshape(132, 100) nb = np.arange(7700).reshape(100, 77) Assert.all_eq(expr.dot(av, bv).glom(), np.dot(na, nb)) # Test with row < col av = expr.arange((67, 100)) bv = expr.arange((100, 77)) na = np.arange(6700).reshape(67, 100) nb = np.arange(7700).reshape(100, 77) Assert.all_eq(expr.dot(av, bv).glom(), np.dot(na, nb)) #Dot with numpy obj cv = expr.arange((77, 100)) dv = np.arange(8800).reshape(100, 88) nc = np.arange(7700).reshape(77, 100) nd = np.arange(8800).reshape(100, 88) Assert.all_eq(expr.dot(cv, dv).glom(), np.dot(nc, nd))
def test_vec_vec(self): av = expr.arange(stop=100) bv = expr.arange(stop=100) na = np.arange(100) nb = np.arange(100) Assert.all_eq(expr.dot(av, bv).glom(), np.dot(na, nb))
def test_reshape3(self): a = expr.arange((100, 100)) b = expr.reshape(a, (10000,)) c = expr.reshape(b, (10000, 1)) d = expr.reshape(c, (1, 10000)) e = expr.arange((1, 10000)) Assert.all_eq(d.glom(), e.glom())
def test_reshape6(self): a = expr.arange((12319, )) b = expr.reshape(a, (127, 97)) c = expr.reshape(b, (97, 127)) d = expr.reshape(c, (1, 12319)) e = expr.arange((1, 12319)) Assert.all_eq(d.glom(), e.glom())
def test_2d_vec(self): av = expr.arange((77, 100)) bv = expr.arange(stop = 100) na = np.arange(7700).reshape(77, 100) nb = np.arange(100) Assert.all_eq(expr.dot(av, bv).glom(), np.dot(na, nb))
def test_reshape4(self): a = expr.arange((10000, )) b = expr.reshape(a, (10, 1000)) c = expr.reshape(b, (1000, 10)) d = expr.reshape(c, (20, 500)) e = expr.reshape(d, (500, 20)) f = expr.reshape(e, (1, 10000)) g = expr.arange((1, 10000)) Assert.all_eq(f.glom(), g.glom())
def test_matmul(self): x = expr.arange(XDIM, dtype=np.int).astype(np.float64) y = expr.arange(YDIM, dtype=np.int).astype(np.float64) z = expr.dot(x, y) nx = np.arange(np.prod(XDIM), dtype=np.int).reshape(XDIM).astype(np.float64) ny = np.arange(np.prod(YDIM), dtype=np.int).reshape(YDIM).astype(np.float64) nz = np.dot(nx, ny) Assert.all_eq(z.glom(), nz)
def test_reshape7(self): t1 = expr.arange((23, 120, 100)).glom() t2 = expr.arange((12, 230, 100)).glom() t3 = expr.arange((276000, 1)).glom() t4 = expr.arange((1, 276000)).glom() a = expr.arange((100, 23, 120)) b = expr.arange((12, 23, 1000)) c = expr.arange((1, 276000)) d = expr.arange((276000, 1)) e = expr.arange((276000, )) Assert.all_eq(expr.reshape(a, (23, 120, 100)).glom(), t1) Assert.all_eq(expr.reshape(a, (12, 230, 100)).glom(), t2) Assert.all_eq(expr.reshape(a, (276000, 1)).glom(), t3) Assert.all_eq(expr.reshape(a, (1, 276000)).glom(), t4) Assert.all_eq(expr.reshape(b, (23, 120, 100)).glom(), t1) Assert.all_eq(expr.reshape(b, (12, 230, 100)).glom(), t2) Assert.all_eq(expr.reshape(b, (276000, 1)).glom(), t3) Assert.all_eq(expr.reshape(b, (1, 276000)).glom(), t4) Assert.all_eq(expr.reshape(c, (23, 120, 100)).glom(), t1) Assert.all_eq(expr.reshape(c, (12, 230, 100)).glom(), t2) Assert.all_eq(expr.reshape(c, (276000, 1)).glom(), t3) Assert.all_eq(expr.reshape(c, (1, 276000)).glom(), t4) Assert.all_eq(expr.reshape(d, (23, 120, 100)).glom(), t1) Assert.all_eq(expr.reshape(d, (12, 230, 100)).glom(), t2) Assert.all_eq(expr.reshape(d, (276000, 1)).glom(), t3) Assert.all_eq(expr.reshape(d, (1, 276000)).glom(), t4) Assert.all_eq(expr.reshape(e, (23, 120, 100)).glom(), t1) Assert.all_eq(expr.reshape(e, (12, 230, 100)).glom(), t2) Assert.all_eq(expr.reshape(e, (276000, 1)).glom(), t3) Assert.all_eq(expr.reshape(e, (1, 276000)).glom(), t4)
def test_2d_vec(self): # Test with row > col av = expr.arange((100, 77)) bv = expr.arange(stop=77) na = np.arange(7700).reshape(100, 77) nb = np.arange(77) Assert.all_eq(expr.dot(av, bv).glom(), np.dot(na, nb)) # Test with col > row av = expr.arange((77, 100)) bv = expr.arange(stop=100) na = np.arange(7700).reshape(77, 100) nb = np.arange(100) Assert.all_eq(expr.dot(av, bv).glom(), np.dot(na, nb))
def test_numpy_2d_vec(self): av = expr.arange((77, 100)) bv = np.arange(100) na = np.arange(7700).reshape(77, 100) nb = np.arange(100) Assert.all_eq(expr.dot(av, bv).glom(), np.dot(na, nb))
def test_sum_2d(self): x = expr.arange((TEST_SIZE, TEST_SIZE), dtype=np.int) nx = np.arange(TEST_SIZE * TEST_SIZE, dtype=np.int).reshape((TEST_SIZE, TEST_SIZE)) for axis in [None, 0, 1]: y = x.sum(axis) val = y.glom() Assert.all_eq(val, nx.sum(axis))
def test_argmax_2d(self): for axis in [1]: #[None, 0, 1]: x = expr.arange((TEST_SIZE, TEST_SIZE), dtype=np.int) nx = np.arange(TEST_SIZE * TEST_SIZE, dtype=np.int).reshape((TEST_SIZE, TEST_SIZE)) y = x.argmax(axis=axis) val = expr.glom(y) Assert.all_eq(val, nx.argmax(axis=axis))
def test_slice_reduce(self): x = expr.arange((TEST_SIZE, TEST_SIZE, TEST_SIZE), dtype=np.int) nx = np.arange(TEST_SIZE * TEST_SIZE * TEST_SIZE, dtype=np.int).reshape((TEST_SIZE, TEST_SIZE, TEST_SIZE)) y = x[:, :, 0].sum() val = y.glom() Assert.all_eq(val, nx[:, :, 0].sum())
def test_argmax_3d(self): x = expr.arange((TEST_SIZE, TEST_SIZE, TEST_SIZE), dtype=np.int64) nx = np.arange(TEST_SIZE * TEST_SIZE * TEST_SIZE, dtype=np.int64).reshape((TEST_SIZE, TEST_SIZE, TEST_SIZE)) for axis in [None, 0, 1, 2]: y = x.argmax(axis) val = y.glom() Assert.all_eq(val, nx.argmax(axis))
def test_numpy_vec_2d(self): av = expr.arange(stop = 100) bv = np.arange(7700).reshape(100, 77) na = np.arange(100) nb = np.arange(7700).reshape(100, 77) Assert.all_eq(expr.dot(av, bv).glom(), np.dot(na, nb))
def test_slice_map(self): x = expr.arange((TEST_SIZE, TEST_SIZE)) z = x[5:8, 5:8] z = expr.map(z, add_one_tile) print z nx = np.arange(TEST_SIZE*TEST_SIZE).reshape(TEST_SIZE, TEST_SIZE) Assert.all_eq(z.glom(), nx[5:8, 5:8] + 1)
def test_slice_shuffle(self): x = expr.arange((TEST_SIZE, TEST_SIZE)) z = x[5:8, 5:8] z = expr.shuffle(z, add_one_extent) val = z.force() nx = np.arange(TEST_SIZE*TEST_SIZE).reshape(TEST_SIZE, TEST_SIZE) Assert.all_eq(val.glom(), nx[5:8, 5:8] + 1)
def test_argmax_2d(self): for axis in [1]: #[None, 0, 1]: x = expr.arange((TEST_SIZE, TEST_SIZE), dtype=np.int) nx = np.arange(TEST_SIZE * TEST_SIZE, dtype=np.int).reshape( (TEST_SIZE, TEST_SIZE)) y = x.argmax(axis=axis) val = expr.glom(y) Assert.all_eq(val, nx.argmax(axis=axis))
def test_sum_2d(self): x = expr.arange((TEST_SIZE, TEST_SIZE), dtype=np.int) nx = np.arange(TEST_SIZE * TEST_SIZE, dtype=np.int).reshape( (TEST_SIZE, TEST_SIZE)) for axis in [None, 0, 1]: y = x.sum(axis) val = y.glom() Assert.all_eq(val, nx.sum(axis))
def test_slice_map2(self): x = expr.arange((10, 10, 10), dtype=np.int) nx = np.arange(10 * 10 * 10, dtype=np.int).reshape((10, 10, 10)) y = x[:, :, 0] z = expr.map(y, lambda tile: tile + 13) val = z.glom() Assert.all_eq(val.reshape(10, 10), nx[:, :, 0] + 13)
def test_slice_sub(self): a = expr.arange((TEST_SIZE,), dtype=np.int) v = (a[1:] - a[:-1]) print optimize.optimize(v) v = v.glom() print v na = np.arange(TEST_SIZE, dtype=np.int) nv = na[1:] - na[:-1] Assert.all_eq(v, nv)
def test_argmax_3d(self): x = expr.arange((TEST_SIZE, TEST_SIZE, TEST_SIZE), dtype=np.int64) nx = np.arange(TEST_SIZE * TEST_SIZE * TEST_SIZE, dtype=np.int64).reshape( (TEST_SIZE, TEST_SIZE, TEST_SIZE)) for axis in [None, 0, 1, 2]: y = x.argmax(axis) val = y.glom() Assert.all_eq(val, nx.argmax(axis))
def test_index(self): a = expr.arange((TEST_SIZE, TEST_SIZE)) b = expr.ones((10,), dtype=np.int) z = a[b] val = expr.evaluate(z) nx = np.arange(TEST_SIZE * TEST_SIZE).reshape(TEST_SIZE, TEST_SIZE) ny = np.ones((10,), dtype=np.int) Assert.all_eq(val.glom(), nx[ny])
def test_index(self): a = expr.arange((TEST_SIZE, TEST_SIZE)) b = expr.ones((10, ), dtype=np.int) z = a[b] val = expr.evaluate(z) nx = np.arange(TEST_SIZE * TEST_SIZE).reshape(TEST_SIZE, TEST_SIZE) ny = np.ones((10, ), dtype=np.int) Assert.all_eq(val.glom(), nx[ny])
def test_fio_dense(self): self.create_path() t1 = expr.arange((100, 100)).evaluate() Assert.eq(expr.save(t1, "fiotest1", self.test_dir, False), True) Assert.all_eq(t1.glom(), expr.load("fiotest1", self.test_dir, False).glom()) Assert.eq(expr.save(t1, "fiotest1", self.test_dir, True), True) Assert.all_eq(t1.glom(), expr.load("fiotest1", self.test_dir, True).glom()) Assert.eq(expr.pickle(t1, "fiotest2", self.test_dir, False), True) Assert.all_eq(t1.glom(), expr.unpickle("fiotest2", self.test_dir, False).glom()) Assert.eq(expr.pickle(t1, "fiotest2", self.test_dir, True), True) Assert.all_eq(t1.glom(), expr.unpickle("fiotest2", self.test_dir, True).glom())
def jacobi_init(size): """ Input array constructor Parameters ---------- size : int Size of one dimension to array Returns ---------- av * bv : spartan nd array Formatted input array to computation (av * bv)[:, -1].reshape((DIM, )) : Expr RHS vector extracted from input array """ av = expr.arange(start=2, stop=size + 2) bv = expr.arange(start=4, stop=size + 4).reshape((size, 1)) A = av * bv return A, A[:, -1:].reshape((size,))
def jacobi_init(size): """ Input array constructor Parameters ---------- size : int Size of one dimension to array Returns ---------- av * bv : spartan nd array Formatted input array to computation (av * bv)[:, -1].reshape((DIM, )) : Expr RHS vector extracted from input array """ av = expr.arange(start=2, stop=size + 2) bv = expr.arange(start=4, stop=size + 4).reshape((size, 1)) A = av * bv return A, A[:, -1:].reshape((size, ))
def fit(self, X, centers=None): """Compute k-means clustering. Parameters ---------- X : spartan matrix, shape=(n_samples, n_features). It should be tiled by rows. centers : numpy.ndarray. The initial centers. If None, it will be randomly generated. """ num_dim = X.shape[1] num_points = X.shape[0] labels = expr.zeros((num_points, 1), dtype=np.int) if centers is None: centers = expr.from_numpy(np.random.rand(self.n_clusters, num_dim)) for i in range(self.n_iter): X_broadcast = expr.reshape(X, (X.shape[0], 1, X.shape[1])) centers_broadcast = expr.reshape(centers, (1, centers.shape[0], centers.shape[1])) distances = expr.sum(expr.square(X_broadcast - centers_broadcast), axis=2) labels = expr.argmin(distances, axis=1) center_idx = expr.arange((1, centers.shape[0])) matches = expr.reshape(labels, (labels.shape[0], 1)) == center_idx matches = matches.astype(np.int64) counts = expr.sum(matches, axis=0) centers = expr.sum(X_broadcast * expr.reshape(matches, (matches.shape[0], matches.shape[1], 1)), axis=0) counts = counts.optimized().glom() centers = centers.optimized().glom() # If any centroids don't have any points assigined to them. zcount_indices = (counts == 0).reshape(self.n_clusters) if np.any(zcount_indices): # One or more centroids may not have any points assigned to them, # which results in their position being the zero-vector. We reseed these # centroids with new random values. n_points = np.count_nonzero(zcount_indices) # In order to get rid of dividing by zero. counts[zcount_indices] = 1 centers[zcount_indices, :] = np.random.randn(n_points, num_dim) centers = centers / counts.reshape(centers.shape[0], 1) centers = expr.from_numpy(centers) return centers, labels '''
def profile1(self): self.create_path() t1 = expr.arange((1000, 1000)).evaluate() time_a, a = util.timeit(lambda: expr.save(t1, "fiotest3", self.test_dir, False)) util.log_info('Save a %s dense array in %s without zip', t1.shape, time_a) time_a, a = util.timeit(lambda: expr.load("fiotest3", self.test_dir, False).evaluate()) util.log_info('Load a %s dense array in %s without zip', t1.shape, time_a) time_a, a = util.timeit(lambda: expr.save(t1, "fiotest3", self.test_dir, True)) util.log_info('Save a %s dense array in %s with zip', t1.shape, time_a) time_a, a = util.timeit(lambda: expr.load("fiotest3", self.test_dir, True).evaluate()) util.log_info('Load a %s dense array in %s with zip', t1.shape, time_a) time_a, a = util.timeit(lambda: expr.pickle(t1, "fiotest4", self.test_dir, False)) util.log_info('Pickle a %s dense array in %s without zip', t1.shape, time_a) time_a, a = util.timeit(lambda: expr.unpickle("fiotest4", self.test_dir, False).evaluate()) util.log_info('Unpickle a %s dense array in %s without zip', t1.shape, time_a) time_a, a = util.timeit(lambda: expr.pickle(t1, "fiotest4", self.test_dir, True)) util.log_info('Pickle a %s dense array in %s with zip', t1.shape, time_a) time_a, a = util.timeit(lambda: expr.unpickle("fiotest4", self.test_dir, True).evaluate()) util.log_info('Unpickle a %s dense array in %s with zip', t1.shape, time_a)
def test_optimization_shape(self): shape = (200, 800) na = np.arange(np.prod(shape), dtype=np.int).reshape(shape) nb = np.random.randint(1, 1000, (1000, 1000)) nc = np.random.randint(1, 1000, (1000, 1000)) a = expr.arange(shape, dtype=np.int) b = expr.from_numpy(nb) c = expr.from_numpy(nc) d = b + c e = b + d f = d[200:900, 200:900] g = e[200:900, 200:900] h = f + g i = f + h j = h[100:500, 100:500] k = i[100:300, 100:300] l = expr.reshape(expr.ravel(j), (800, 200)) m = expr.dot(a, l) n = m + k o = n + m q = o[100:200, 100:200] nd = nb + nc ne = nb + nd nf = nd[200:900, 200:900] ng = ne[200:900, 200:900] nh = nf + ng ni = nf + nh nj = nh[100:500, 100:500] nk = ni[100:300, 100:300] nl = np.reshape(np.ravel(nj), (800, 200)) nm = np.dot(na, nl) nn = nm + nk no = nn + nm nq = no[100:200, 100:200] Assert.all_eq(nq, q.optimized().glom(), tolerance = 1e-10)
def test_reshape2(self): a = expr.arange((1000, ), tile_hint=[100]) b = expr.reshape(a, (10, 100)).force() c = expr.reshape(b, (1000, )).force()
def test_transpose1(self): t1 = expr.arange((3721, 1347)) t2 = np.transpose(np.reshape(np.arange(3721 * 1347), (3721, 1347))) Assert.all_eq(expr.transpose(t1).glom(), t2)
def test_reshape1(self): a = expr.arange((10, 10)) b = expr.reshape(a, (100,)) c = expr.arange((100,)) Assert.all_eq(b.glom(), c.glom())
def test_transpose2(self): t1 = expr.arange((101, 102, 103)) t2 = np.transpose( np.reshape(np.arange(101 * 102 * 103), (101, 102, 103))) Assert.all_eq(expr.transpose(t1).glom(), t2)
def test_reshape1(self): a = expr.arange((10, 10)) b = expr.reshape(a, (100, )) c = expr.arange((100, )) Assert.all_eq(b.glom(), c.glom())
def fit(self, X, centers=None, implementation='map2'): """Compute k-means clustering. Parameters ---------- X : spartan matrix, shape=(n_samples, n_features). It should be tiled by rows. centers : numpy.ndarray. The initial centers. If None, it will be randomly generated. """ num_dim = X.shape[1] num_points = X.shape[0] labels = expr.zeros((num_points, 1), dtype=np.int) if implementation == 'map2': if centers is None: centers = np.random.rand(self.n_clusters, num_dim) for i in range(self.n_iter): labels = expr.map2(X, 0, fn=kmeans_map2_dist_mapper, fn_kw={"centers": centers}, shape=(X.shape[0], )) counts = expr.map2(labels, 0, fn=kmeans_count_mapper, fn_kw={'centers_count': self.n_clusters}, shape=(centers.shape[0], )) new_centers = expr.map2( (X, labels), (0, 0), fn=kmeans_center_mapper, fn_kw={'centers_count': self.n_clusters}, shape=(centers.shape[0], centers.shape[1])) counts = counts.optimized().glom() centers = new_centers.optimized().glom() # If any centroids don't have any points assigined to them. zcount_indices = (counts == 0).reshape(self.n_clusters) if np.any(zcount_indices): # One or more centroids may not have any points assigned to them, # which results in their position being the zero-vector. We reseed these # centroids with new random values. n_points = np.count_nonzero(zcount_indices) # In order to get rid of dividing by zero. counts[zcount_indices] = 1 centers[zcount_indices, :] = np.random.randn( n_points, num_dim) centers = centers / counts.reshape(centers.shape[0], 1) return centers, labels elif implementation == 'outer': if centers is None: centers = expr.rand(self.n_clusters, num_dim) for i in range(self.n_iter): labels = expr.outer((X, centers), (0, None), fn=kmeans_outer_dist_mapper, shape=(X.shape[0], )) #labels = expr.argmin(distances, axis=1) counts = expr.map2(labels, 0, fn=kmeans_count_mapper, fn_kw={'centers_count': self.n_clusters}, shape=(centers.shape[0], )) new_centers = expr.map2( (X, labels), (0, 0), fn=kmeans_center_mapper, fn_kw={'centers_count': self.n_clusters}, shape=(centers.shape[0], centers.shape[1])) counts = counts.optimized().glom() centers = new_centers.optimized().glom() # If any centroids don't have any points assigined to them. zcount_indices = (counts == 0).reshape(self.n_clusters) if np.any(zcount_indices): # One or more centroids may not have any points assigned to them, # which results in their position being the zero-vector. We reseed these # centroids with new random values. n_points = np.count_nonzero(zcount_indices) # In order to get rid of dividing by zero. counts[zcount_indices] = 1 centers[zcount_indices, :] = np.random.randn( n_points, num_dim) centers = centers / counts.reshape(centers.shape[0], 1) centers = expr.from_numpy(centers) return centers, labels elif implementation == 'broadcast': if centers is None: centers = expr.rand(self.n_clusters, num_dim) for i in range(self.n_iter): util.log_warn("k_means_ %d %d", i, time.time()) X_broadcast = expr.reshape(X, (X.shape[0], 1, X.shape[1])) centers_broadcast = expr.reshape( centers, (1, centers.shape[0], centers.shape[1])) distances = expr.sum(expr.square(X_broadcast - centers_broadcast), axis=2) labels = expr.argmin(distances, axis=1) center_idx = expr.arange((1, centers.shape[0])) matches = expr.reshape(labels, (labels.shape[0], 1)) == center_idx matches = matches.astype(np.int64) counts = expr.sum(matches, axis=0) centers = expr.sum( X_broadcast * expr.reshape(matches, (matches.shape[0], matches.shape[1], 1)), axis=0) counts = counts.optimized().glom() centers = centers.optimized().glom() # If any centroids don't have any points assigined to them. zcount_indices = (counts == 0).reshape(self.n_clusters) if np.any(zcount_indices): # One or more centroids may not have any points assigned to them, # which results in their position being the zero-vector. We reseed these # centroids with new random values. n_points = np.count_nonzero(zcount_indices) # In order to get rid of dividing by zero. counts[zcount_indices] = 1 centers[zcount_indices, :] = np.random.randn( n_points, num_dim) centers = centers / counts.reshape(centers.shape[0], 1) centers = expr.from_numpy(centers) return centers, labels elif implementation == 'shuffle': if centers is None: centers = np.random.rand(self.n_clusters, num_dim) for i in range(self.n_iter): # Reset them to zero. new_centers = expr.ndarray((self.n_clusters, num_dim), reduce_fn=lambda a, b: a + b) new_counts = expr.ndarray((self.n_clusters, 1), dtype=np.int, reduce_fn=lambda a, b: a + b) _ = expr.shuffle(X, _find_cluster_mapper, kw={ 'd_pts': X, 'old_centers': centers, 'new_centers': new_centers, 'new_counts': new_counts, 'labels': labels }, shape_hint=(1, ), cost_hint={ hash(labels): { '00': 0, '01': np.prod(labels.shape) } }) _.force() new_counts = new_counts.glom() new_centers = new_centers.glom() # If any centroids don't have any points assigined to them. zcount_indices = (new_counts == 0).reshape(self.n_clusters) if np.any(zcount_indices): # One or more centroids may not have any points assigned to them, # which results in their position being the zero-vector. We reseed these # centroids with new random values. n_points = np.count_nonzero(zcount_indices) # In order to get rid of dividing by zero. new_counts[zcount_indices] = 1 new_centers[zcount_indices, :] = np.random.randn( n_points, num_dim) new_centers = new_centers / new_counts centers = new_centers return centers, labels
def test_argmax_1d(self): x = expr.arange((TEST_SIZE, ), dtype=np.int) nx = np.arange(TEST_SIZE, dtype=np.int) y = x.argmax() val = y.glom() Assert.all_eq(val, nx.argmax())
def simulate(ts_all, te_all, lamb_all, num_paths): '''Range over a number of independent products. :param ts_all: DistArray Start dates for a series of swaptions. :param te_all: DistArray End dates for a series of swaptions. :param lamb_all: DistArray Parameter values for a series of swaptions. :param num_paths: Int Number of paths used in random walk. :rtype: DistArray ''' swaptions = [] i = 0 for ts_a, te, lamb in zip(ts_all, te_all, lamb_all): for ts in ts_a: #start = time() print i time_structure = arange(None, 0, ts + DELTA, DELTA) maturity_structure = arange(None, 0, te, DELTA) ############# MODEL ############### # Variance reduction technique - Antithetic Variates. eps_tmp = randn(time_structure.shape[0] - 1, num_paths) eps = concatenate(eps_tmp, -eps_tmp, 1) # Forward LIBOR rates for the construction of the spot measure. f_kk = zeros((time_structure.shape[0], 2*num_paths)) f_kk = assign(f_kk, np.s_[0, :], F_0) # Plane kxN of simulated LIBOR rates. f_kn = ones((maturity_structure.shape[0], 2*num_paths))*F_0 # Simulations of the plane f_kn for each time step. for t in xrange(1, time_structure.shape[0]): f_kn_new = f_kn[1:, :]*exp(lamb*mu(f_kn, lamb)*DELTA-0.5*lamb*lamb * DELTA + lamb*eps[t - 1, :]*sqrt(DELTA)) f_kk = assign(f_kk, np.s_[t, :], f_kn_new[0]) f_kn = f_kn_new ############## PRODUCT ############### # Value of zero coupon bonds. zcb = ones((int((te-ts)/DELTA)+1, 2*num_paths)) f_kn_modified = 1 + DELTA*f_kn for j in xrange(zcb.shape[0] - 1): zcb = assign(zcb, np.s_[j + 1], zcb[j] / f_kn_modified[j]) # Swaption price at maturity. last_row = zcb[zcb.shape[0] - 1, :].reshape((20, )) swap_ts = maximum(1 - last_row - THETA*DELTA*expr.sum(zcb[1:], 0), 0) # Spot measure used for discounting. b_ts = ones((2*num_paths, )) tmp = 1 + DELTA * f_kk for j in xrange(int(ts/DELTA)): b_ts *= tmp[j].reshape((20, )) # Swaption price at time 0. swaption = swap_ts/b_ts # Save expected value in bps and std. me = mean((swaption[0:num_paths] + swaption[num_paths:])/2) * 10000 st = std((swaption[0:num_paths] + swaption[num_paths:])/2)/sqrt(num_paths)*10000 swaptions.append([me.optimized().force(), st.optimized().force()]) #print time() - start i += 1 return swaptions
def test_reshape2(self): a = expr.arange((1000,), tile_hint=[100]) b = expr.reshape(a, (10, 100)).force() c = expr.reshape(b, (1000,)).force()
def test_slice_get(self): x = expr.arange((TEST_SIZE, TEST_SIZE)) z = x[5:8, 5:8] val = expr.force(z) nx = np.arange(TEST_SIZE*TEST_SIZE).reshape(TEST_SIZE, TEST_SIZE) Assert.all_eq(val.glom(), nx[5:8, 5:8])
def test_transpose2(self): t1 = expr.arange((101, 102, 103)) t2 = np.transpose(np.reshape(np.arange(101 * 102 * 103), (101, 102, 103))) Assert.all_eq(expr.transpose(t1).glom(), t2)