def test_rectangular(self): lons = numpy.array(range(100)).reshape((10, 10)) lats = numpy.negative(lons) mesh = RectangularMesh(lons, lats, depths=None) bounding_mesh = mesh._get_bounding_mesh() expected_lons = numpy.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 19, 29, 39, 49, 59, 69, 79, 89, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 80, 70, 60, 50, 40, 30, 20, 10 ]) expected_lats = numpy.negative(expected_lons) self.assertTrue((bounding_mesh.lons == expected_lons).all()) self.assertTrue((bounding_mesh.lats == expected_lats).all()) self.assertIsNone(bounding_mesh.depths) depths = lons + 10 mesh = RectangularMesh(lons, lats, depths) expected_depths = expected_lons + 10 bounding_mesh = mesh._get_bounding_mesh() self.assertIsNotNone(bounding_mesh.depths) self.assertTrue((bounding_mesh.depths == expected_depths.flatten()).all()) bounding_mesh = mesh._get_bounding_mesh(with_depths=False) self.assertIsNone(bounding_mesh.depths)
def unmask_temperature(self, signal, order='nested', seed=None): """ Given the harmonic sphere map ``signal`` as the underlying signal, provide a map where the mask has been removed and replaced with the contents of signal. Noise consistent with the noise properties of the observation (without the mask) will be added. """ Nside, lmin, lmax = self.Nside, signal.lmin, signal.lmax random_state = as_random_state(seed) temperature = self.load_temperature_mutable(order) inverse_mask = (self.properties.load_mask_mutable(order) == 1).view(np.ndarray) np.negative(inverse_mask, inverse_mask) # invert the mask in-place # First, smooth the signal with the beam and pixel window smoothed_signal = self.properties.load_beam_transfer_matrix(lmin, lmax) * signal pixwin = load_temperature_pixel_window_matrix(Nside, lmin, lmax) smoothed_signal = pixwin * smoothed_signal # Create map from signal, and replace unmasked values in temperature map signal_map = smoothed_signal.to_pixel(self.Nside) signal_map.change_order_inplace(order) temperature[inverse_mask] = signal_map[inverse_mask] # Finally, add RMS to unmasked area rms_in_mask = self.properties.load_rms(order)[inverse_mask] temperature[inverse_mask] += random_state.normal(scale=rms_in_mask) return temperature
def test_ufunc_out(self): from numpy import array, negative, zeros, sin from math import sin as msin a = array([[1, 2], [3, 4]]) c = zeros((2,2,2)) b = negative(a + a, out=c[1]) #test for view, and also test that forcing out also forces b assert (c[:, :, 1] == [[0, 0], [-4, -8]]).all() assert (b == [[-2, -4], [-6, -8]]).all() #Test broadcast, type promotion b = negative(3, out=a) assert (a == -3).all() c = zeros((2, 2), dtype=float) b = negative(3, out=c) assert b.dtype.kind == c.dtype.kind assert b.shape == c.shape a = array([1, 2]) b = sin(a, out=c) assert(c == [[msin(1), msin(2)]] * 2).all() b = sin(a, out=c+c) assert (c == b).all() #Test shape agreement a = zeros((3,4)) b = zeros((3,5)) raises(ValueError, 'negative(a, out=b)') b = zeros((1,4)) raises(ValueError, 'negative(a, out=b)')
def logpdf(x, nu, s2=1): """Log of the scaled inverse chi-squared probability density function. Parameters ---------- x : array_like quantiles nu : array_like degrees of freedom s2 : array_like, optional scale (default 1) Returns ------- logpdf : ndarray Log of the probability density function evaluated at `x`. """ x = np.asarray(x) nu = np.asarray(nu) s2 = np.asarray(s2) nu_2 = nu/2 y = np.log(x) y *= (nu_2 +1) np.negative(y, out=y) y -= (nu_2*s2)/x y += np.log(s2)*nu_2 y -= gammaln(nu_2) y += np.log(nu_2)*nu_2 return y
def forward_cpu(self, inputs): self.retain_inputs((0, 1)) x, gy = inputs gx = utils.force_array(numpy.sin(x)) numpy.negative(gx, out=gx) gx *= gy return gx,
def testInitializerFunction(self): value = [[-42], [133.7]] shape = [2, 1] with self.test_session(): initializer = lambda: tf.constant(value) with self.assertRaises(ValueError): # Checks that dtype must be specified. tf.Variable(initializer) v1 = tf.Variable(initializer, dtype=tf.float32) self.assertEqual(shape, v1.get_shape()) self.assertAllClose(value, v1.initial_value.eval()) with self.assertRaises(tf.errors.FailedPreconditionError): v1.eval() v2 = tf.Variable(tf.neg(v1.initialized_value()), dtype=tf.float32) self.assertEqual(v1.get_shape(), v2.get_shape()) self.assertAllClose(np.negative(value), v2.initial_value.eval()) # Once v2.initial_value.eval() has been called, v1 has effectively been # initialized. self.assertAllClose(value, v1.eval()) with self.assertRaises(tf.errors.FailedPreconditionError): v2.eval() tf.initialize_all_variables().run() self.assertAllClose(np.negative(value), v2.eval())
def testInitializerFunction(self): value = [[-42], [133.7]] shape = [2, 1] with self.test_session(): initializer = lambda: constant_op.constant(value) v1 = variables.Variable(initializer, dtype=dtypes.float32) self.assertEqual(shape, v1.get_shape()) self.assertEqual(shape, v1.shape) self.assertAllClose(value, v1.initial_value.eval()) with self.assertRaises(errors_impl.FailedPreconditionError): v1.eval() v2 = variables.Variable( math_ops.negative(v1.initialized_value()), dtype=dtypes.float32) self.assertEqual(v1.get_shape(), v2.get_shape()) self.assertEqual(v1.shape, v2.shape) self.assertAllClose(np.negative(value), v2.initial_value.eval()) # Once v2.initial_value.eval() has been called, v1 has effectively been # initialized. self.assertAllClose(value, v1.eval()) with self.assertRaises(errors_impl.FailedPreconditionError): v2.eval() variables.global_variables_initializer().run() self.assertAllClose(np.negative(value), v2.eval())
def test_negative(self): from numpy import array, negative a = array([-5.0, 0.0, 1.0]) b = negative(a) for i in range(3): assert b[i] == -a[i] a = array([-5.0, 1.0]) b = negative(a) a[0] = 5.0 assert b[0] == 5.0 a = array(range(30)) assert negative(a + a)[3] == -6 a = array([[1, 2], [3, 4]]) b = negative(a + a) assert (b == [[-2, -4], [-6, -8]]).all() class Obj(object): def __neg__(self): return "neg" x = Obj() assert type(negative(x)) is str
def neg(target): a = pyext.Buffer(target) # in place transformation (see Python array ufuncs) N.negative(a[:],a[:]) # must mark buffer content as dirty to update graph # (no explicit assignment occurred) a.dirty()
def construct_uvn_frame(n, u, b=None, flip_to_match_image=True): """ Returns an orthonormal 3x3 frame from a normal and one in-plane vector """ n = normalized(n) u = normalized(np.array(u) - np.dot(n, u) * n) v = normalized_cross(n, u) # flip to match image orientation if flip_to_match_image: if abs(u[1]) > abs(v[1]): u, v = v, u if u[0] < 0: u = np.negative(u) if v[1] < 0: v = np.negative(v) if b is None: if n[2] < 0: n = np.negative(n) else: if np.dot(n, b) > 0: n = np.negative(n) # return uvn matrix, column major return np.matrix([ [u[0], v[0], n[0]], [u[1], v[1], n[1]], [u[2], v[2], n[2]], ])
def negateVal(): """negate a boolean, change the sign of a float inplace""" Z=np.random.randint(0,2,100) np.logical_not(Z,out=Z) print Z W=np.random.uniform(-1.0,1.0,100) np.negative(Z,out=Z) print Z
def backward_cpu(self, x, gy): gx = utils.force_array(numpy.square(x[0])) numpy.negative(gx, out=gx) gx += 1 numpy.sqrt(gx, out=gx) numpy.reciprocal(gx, out=gx) gx *= gy[0] return gx,
def _quaternion_from_matrix(matrix, isprecise=False): """Summary Args: matrix (TYPE): Description isprecise (bool, optional): Description Returns: TYPE: Description """ M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4] if isprecise: q = np.empty((4, )) t = np.trace(M) if t > M[3, 3]: q[0] = t q[3] = M[1, 0] - M[0, 1] q[2] = M[0, 2] - M[2, 0] q[1] = M[2, 1] - M[1, 2] else: i, j, k = 1, 2, 3 if M[1, 1] > M[0, 0]: i, j, k = 2, 3, 1 if M[2, 2] > M[i, i]: i, j, k = 3, 1, 2 t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] q[i] = t q[j] = M[i, j] + M[j, i] q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q *= 0.5 / math.sqrt(t * M[3, 3]) # NEED MORMALIZE else: m00 = M[0, 0] m01 = M[0, 1] m02 = M[0, 2] m10 = M[1, 0] m11 = M[1, 1] m12 = M[1, 2] m20 = M[2, 0] m21 = M[2, 1] m22 = M[2, 2] # symmetric matrix K K = np.array([[m00-m11-m22, 0.0, 0.0, 0.0], [m01+m10, m11-m00-m22, 0.0, 0.0], [m02+m20, m12+m21, m22-m00-m11, 0.0], [m21-m12, m02-m20, m10-m01, m00+m11+m22]]) K /= 3.0 # quaternion is eigenvector of K that corresponds to largest eigenvalue w, V = np.linalg.eigh(K) q = V[[3, 0, 1, 2], np.argmax(w)] if q[0] < 0.0: np.negative(q, q) n = np.linalg.norm(q) if n > 1.0: q = q/n # taken from ransformations.py so w first return (q[0], q[1:])
def backProp_epoch(I,T,W_IH,W_HO,A_O, DeltaW_IH,DeltaW_HO, net_H=None,net_O=None,A_H=None, Delta_H=None,Delta_O=None, sigma_H=(afs.sigmoid,afs.sigmoid_prime), sigma_O=(afs.sigmoid,afs.sigmoid_prime), errorF=(efs.sumSquaredError,efs.sumSquaredError_prime)): """ net: node input function result A: node activation sigma: activation function errorF: ( error function(target,output),error function derivative(target,output) ) """ (M_H,M_I) = W_IH.shape; M_I-=1; (M_O,blah) = W_HO.shape; (M,N) = I.shape if net_H == None: net_H = np.empty((M_H,1)) if net_O == None: net_O = np.empty((M_O,1)) if A_H == None: A_H = np.empty_like(net_H) if Delta_H == None: Delta_H = np.empty_like(net_H) if Delta_O == None: Delta_O = np.empty_like(net_O) # compute hidden layer inputs np.dot(W_IH[:,:-1],I,net_H) # net_H is M_H x N np.add(net_H,np.dot(W_IH[:,-1:],np.ones((1,N))),net_H) # bias # compute hidden layer activations sigma_H[0](net_H,A_H) # A_H is M_H x N # compute output layer inputs np.dot(W_HO[:,:-1],A_H,net_O) np.add(net_O,np.dot(W_HO[:,-1:],np.ones((1,N))),net_O) # bias # compute output layer activations sigma_O[0](net_O,A_O) # compute output error errorVal = errorF[0](A_O,T) # compute output error gradient errorF[1](T,A_O,Delta_O) # Delta_O holds tmp value np.negative(Delta_O,Delta_O) sigma_O[1](A_O,net_O) # reusing net_O matrix as tmp storage np.multiply(Delta_O,net_O,Delta_O) # compute output weight update tmpA_H = np.append(A_H,np.ones((1,N)),axis=0) # add bias inputs np.dot(Delta_O,tmpA_H.T,DeltaW_HO) # TODO: compute using tanspose for speed-up? # compute hidden error gradient sigma_H[1](A_H,Delta_H) # Delta_H holds tmp value np.multiply(Delta_H, np.dot(W_HO[:,:-1].T,Delta_O), # TODO: W^T*Delta_O too wasteful Delta_H) # compute hidden weight update tmpI = np.append(I,np.ones((1,N)),axis=0) # add bias inputs np.dot(Delta_H,tmpI.T,DeltaW_IH) # TODO: compute using transpose for speed-up? # np.multiply(DeltaW_IH,alpha,DeltaW_IH) # apply learning rate # TODO: force garbage collection at key areas where temporaries are created return errorVal
def __neg__(self): """ return negated """ self.A = numpy.negative(self.A) self.bX = numpy.negative(self.bX) self.bY = numpy.negative(self.bY) self.bZ = numpy.negative(self.bZ) return self
def generate_unit_phase_shifts(shape, float_type=float): """ Computes the complex phase shift's angle due to a unit spatial shift. This is meant to be a helper function for ``register_mean_offsets``. It does this by computing a table of the angle of the phase of a unit shift in each dimension (with a factor of :math:`2\pi`). This allows arbitrary phase shifts to be made in each dimensions by multiplying these angles by the size of the shift and added to the existing angle to induce the proper phase shift in fourier space, which is equivalent to the spatial translation. Args: shape(tuple of ints): shape of the data to be shifted. float_type(real type): phase type (default numpy.float64) Returns: (numpy.ndarray): an array containing the angle of the complex phase shift to use for each dimension. Examples: >>> generate_unit_phase_shifts((2,4)) array([[[-0. , -0. , -0. , -0. ], [-3.14159265, -3.14159265, -3.14159265, -3.14159265]], <BLANKLINE> [[-0. , -1.57079633, -3.14159265, -4.71238898], [-0. , -1.57079633, -3.14159265, -4.71238898]]]) """ # Convert to `numpy`-based type if not done already. float_type = numpy.dtype(float_type).type # Must be of type float. assert issubclass(float_type, numpy.floating) assert numpy.dtype(float_type).itemsize >= 4 # Get the negative wave vector negative_wave_vector = numpy.asarray(shape, dtype=float_type) numpy.reciprocal(negative_wave_vector, out=negative_wave_vector) negative_wave_vector *= 2*numpy.pi numpy.negative(negative_wave_vector, out=negative_wave_vector) # Get the indices for each point in the selected space. indices = xnumpy.cartesian_product([numpy.arange(_) for _ in shape]) # Determine the phase offset for each point in space. complex_angle_unit_shift = indices * negative_wave_vector complex_angle_unit_shift = complex_angle_unit_shift.T.copy() complex_angle_unit_shift = complex_angle_unit_shift.reshape( (len(shape),) + shape ) return(complex_angle_unit_shift)
def getMountainWeights(bgDiff, mountainCenter, halfLife = 0.1): mountainWeights = bgDiff - mountainCenter k = np.log(2) / halfLife mountainWeights *= k np.abs(mountainWeights, out = mountainWeights) np.negative(mountainWeights, out = mountainWeights) np.exp(mountainWeights, out = mountainWeights) return mountainWeights
def forward_cpu(self, inputs): self.retain_inputs((0, 1)) x, gy = inputs gx = utils.force_array(numpy.square(x)) numpy.negative(gx, out=gx) gx += 1 numpy.sqrt(gx, out=gx) numpy.reciprocal(gx, out=gx) gx *= gy return gx,
def __neg__(self): """ return negated """ if __sparse__: self.A = -self.A else: self.A = numpy.negative(self.A) self.b = numpy.negative(self.b) return self
def test_lower_align(self): # check data that is not aligned to element size # i.e doubles are aligned to 4 bytes on i386 d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) assert_equal(np.abs(d), d) assert_equal(np.negative(d), -d) np.negative(d, out=d) np.negative(np.ones_like(d), out=d) np.abs(d, out=d) np.abs(np.ones_like(d), out=d)
def decompose(self): M = numpy.array(self._data, dtype = numpy.float64, copy = True).T if abs(M[3, 3]) < self._EPS: raise ValueError("M[3, 3] is zero") M /= M[3, 3] P = M.copy() P[:, 3] = 0.0, 0.0, 0.0, 1.0 if not numpy.linalg.det(P): raise ValueError("matrix is singular") scale = numpy.zeros((3, )) shear = [0.0, 0.0, 0.0] angles = [0.0, 0.0, 0.0] mirror = [1, 1, 1] translate = M[3, :3].copy() M[3, :3] = 0.0 row = M[:3, :3].copy() scale[0] = math.sqrt(numpy.dot(row[0], row[0])) row[0] /= scale[0] shear[0] = numpy.dot(row[0], row[1]) row[1] -= row[0] * shear[0] scale[1] = math.sqrt(numpy.dot(row[1], row[1])) row[1] /= scale[1] shear[0] /= scale[1] shear[1] = numpy.dot(row[0], row[2]) row[2] -= row[0] * shear[1] shear[2] = numpy.dot(row[1], row[2]) row[2] -= row[1] * shear[2] scale[2] = math.sqrt(numpy.dot(row[2], row[2])) row[2] /= scale[2] shear[1:] /= scale[2] if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: numpy.negative(scale, scale) numpy.negative(row, row) # If the scale was negative, we give back a seperate mirror vector to indicate this. if M[0, 0] < 0: mirror[0] = -1 if M[1, 1] < 0: mirror[1] = -1 if M[2, 2] < 0: mirror[2] = -1 angles[1] = math.asin(-row[0, 2]) if math.cos(angles[1]): angles[0] = math.atan2(row[1, 2], row[2, 2]) angles[2] = math.atan2(row[0, 1], row[0, 0]) else: angles[0] = math.atan2(-row[2, 1], row[1, 1]) angles[2] = 0.0 return Vector(data = scale), Vector(data = shear), Vector(data = angles), Vector(data = translate), Vector(data = mirror)
def computeDeltaW(tmpDeltaW,A_O,y,DeltaW,errorF): # init values and allocate memory M_O = A_O.shape[0] # num vector elements errors = np.zeros_like(y) # compute errors errorF[1](y,A_O,errors) np.negative(errors,errors) # compute Delta_W for i in range(M_O): tmpDeltaW[i,:,:] *= errors[i] np.add(DeltaW,tmpDeltaW[i,:,:],DeltaW)
def __neg__(self): """Operator for inverting the phase of the spectrogram with ``-spectrogram``. :returns: a :class:`sumpf.Spectrogram` instance """ channels = sumpf_internal.allocate_array(shape=self.shape(), dtype=numpy.complex128) numpy.negative(self._channels, out=channels) return Spectrogram(channels=channels, resolution=self.__resolution, sampling_rate=self.__sampling_rate, offset=self.__offset, labels=self._labels)
def quaternion_conjugate(quaternion): """Return conjugate of quaternion. >>> q0 = random_quaternion() >>> q1 = quaternion_conjugate(q0) >>> q1[0] == q0[0] and all(q1[1:] == -q0[1:]) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) numpy.negative(q[1:], q[1:]) return q
def quaternion_inverse(quaternion): """Return inverse of quaternion. >>> q0 = random_quaternion() >>> q1 = quaternion_inverse(q0) >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) numpy.negative(q[1:], q[1:]) return q / numpy.dot(q, q)
def test_negative(self): from numpy import array, negative a = array([-5.0, 0.0, 1.0]) b = negative(a) for i in range(3): assert b[i] == -a[i] a = array([-5.0, 1.0]) b = negative(a) a[0] = 5.0 assert b[0] == 5.0
def plot_hists(nus=[143,353], map1_name=None, map2_name=None, maskname='wmap_temperature_kq85_analysis_mask_r10_9yr_v5.fits', nside=2048, fwhm=0.0, bins=100,normed=True, atol=1e-6, ymin=0.01, ymax=None, xmin=-0.001, xmax=0.005): if map1_name is None: map1_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nus[0]) label1 = '{} GHz'.format(nus[0]) if map2_name is None: map2_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nus[1]) label2 = '{} GHz'.format(nus[1]) map1 = prepare_map( map1_name, field=0, maskname=maskname, nside_out=nside, fwhm=fwhm ) map2 = prepare_map( map2_name, field=0, maskname=maskname, nside_out=nside, fwhm=fwhm ) y1,x1 = pl.histogram(map1[np.where(np.negative(np.isclose(map1,0.,atol=atol)))], bins=bins,normed=normed) bin1 = (x1[:-1] + x1[1:]) / 2. y2,x2 = pl.histogram(map2[np.where(np.negative(np.isclose(map2,0.,atol=atol)))], bins=bins,normed=normed) bin2 = (x2[:-1] + x2[1:]) / 2. #return bin1,y1,bin2,y2 fig = plt.figure() ax = plt.gca() ax.semilogy(bin1, y1, lw=3, label=label1,color='red') ax.semilogy(bin2, y2, lw=3, label=label2,color='gray') ax.set_xlim(xmin=xmin,xmax=xmax) ax.set_ylim(ymin=ymin, ymax=ymax) #ax.set_yscale('log') ax.set_xlabel('$\mu K$', fontsize=20) ax.set_yticks([]) plt.draw() plt.legend(frameon=False, fontsize=20) plt.savefig('pdfs_{}GHz_{}GHz_fwhm{:.3}rad.pdf'.format(nus[0],nus[1],fwhm))
def from_matrix(matrix, isprecise=False): """Return quaternion from rotation matrix. If isprecise is True, the input matrix is assumed to be a precise rotation matrix and a faster algorithm is used. """ M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] if isprecise: q = numpy.empty((4, )) t = numpy.trace(M) if t > M[3, 3]: q[0] = t q[3] = M[1, 0] - M[0, 1] q[2] = M[0, 2] - M[2, 0] q[1] = M[2, 1] - M[1, 2] else: i, j, k = 1, 2, 3 if M[1, 1] > M[0, 0]: i, j, k = 2, 3, 1 if M[2, 2] > M[i, i]: i, j, k = 3, 1, 2 t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] q[i] = t q[j] = M[i, j] + M[j, i] q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q *= 0.5 / math.sqrt(t * M[3, 3]) else: m00 = M[0, 0] m01 = M[0, 1] m02 = M[0, 2] m10 = M[1, 0] m11 = M[1, 1] m12 = M[1, 2] m20 = M[2, 0] m21 = M[2, 1] m22 = M[2, 2] # symmetric matrix K K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0], [m01+m10, m11-m00-m22, 0.0, 0.0], [m02+m20, m12+m21, m22-m00-m11, 0.0], [m21-m12, m02-m20, m10-m01, m00+m11+m22]]) K /= 3.0 # quaternion is eigenvector of K that corresponds to largest eigenvalue w, V = numpy.linalg.eigh(K) q = V[[3, 0, 1, 2], numpy.argmax(w)] if q[0] < 0.0: numpy.negative(q, q) #return q.tolist() return [ q[1], q[2], q[3], q[0] ] # sofa order
def track_unavoided_crossings(overlaps: Tensor3D, nHOMO: int) -> Tuple: """ Track the index of the states if there is a crossing using the algorithm described at: J. Chem. Phys. 137, 014512 (2012); doi: 10.1063/1.4732536. """ # 3D array containing the costs # Notice that the cost is compute on half of the overlap matrices # correspoding to Sji_t, the other half corresponds to Sij_t nOverlaps, nOrbitals, _ = overlaps.shape # Indexes taking into account the crossing # There are 2 Overlap matrices at each time t indexes = np.empty((nOverlaps + 1, nOrbitals), dtype=np.int) indexes[0] = np.arange(nOrbitals, dtype=np.int) # Track the crossing using the overlap matrices for k in range(nOverlaps): # Cost matrix to track the corssings logger.info("Tracking crossings at time: {}".format(k)) cost_mtx_homos = np.negative(overlaps[k, :nHOMO, :nHOMO] ** 2) cost_mtx_lumos = np.negative(overlaps[k, nHOMO:, nHOMO:] ** 2) # Compute the swap at time t + dt using two set of Orbitals: # HOMOs and LUMOS swaps_homos = linear_sum_assignment(cost_mtx_homos)[1] swaps_lumos = linear_sum_assignment(cost_mtx_lumos)[1] total_swaps = np.concatenate((swaps_homos, swaps_lumos + nHOMO)) indexes[k + 1] = total_swaps # update the overlaps at times > t with the previous swaps if k != (nOverlaps - 1): # last element k1 = k + 1 # Update the matrix Sji at time t overlaps[k] = swap_columns(overlaps[k], total_swaps) # Update all the matrices Sji at time > t overlaps[k1:] = swap_forward(overlaps[k1:], total_swaps) # Accumulate the swaps acc = indexes[0] arr = np.empty(indexes.shape, dtype=np.int) arr[0] = acc # Fold accumulating the crossings for i in range(nOverlaps): acc = acc[indexes[i + 1]] arr[i + 1] = acc return overlaps, arr
def exceedance_graph(self, ax): ax.set_title('Wave Height Distribution (m)') lims = [0,1] for x in range(0,self.n): yh = [self.y[x], self.y[x]] ax.plot(self.xh,yh,color='grey', alpha=0.5) ax.text(self.x[0]-2*self.dx,self.y[x],self.poe[x]) lims[1] = self.y[x] for x in range(0,self.m): xv = [self.x[x],self.x[x]] ax.plot(xv,self.yv,color='grey', alpha=0.5) ax.text(self.x[x] - self.dx/4,self.y[0]-self.dy,self.x[x]) lims[0] = self.x[x] zi = np.sqrt(np.negative(np.log(self.yi))) ax.plot(self.xi,zi,'o',label='Observed Waves') coef = np.polyfit(self.xi,zi,1) xa = list([0.5 * np.min(self.xi)]) xb = list([1.5 * np.max(self.xi)]) xx = np.concatenate((np.array(xa),self.xi,np.array(xb))) yy = np.polyval(coef, xx) ax.plot(xx,yy, label='Rayleigh Fit') H1 = self.get_bg_value(self.Htr/self.Hrms, 1) H2 = self.get_bg_value(self.Htr/self.Hrms, 2) xxx = self.array_utility(self.x[0],self.x[self.m - 1]) lxxx = len(xxx) yyy = list() for x in range(0,lxxx): if xxx[x] < self.Htr: yyy.append(1 - np.exp(np.negative(np.power(xxx[x]/(H1*self.Hrms),2.0)))) else: yyy.append(1 - np.exp(np.negative(np.power(xxx[x]/(H2*self.Hrms),3.6)))) zzz = np.sqrt(np.negative(np.log(np.subtract(1,yyy)))) ax.plot(xxx,zzz,'r',label='BG Fit') ax.legend() plt.setp(ax.get_yticklabels(), visible=False) plt.setp(ax.get_yticklines(),visible=False) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_xticklines(),visible=False) plt.ylim(0,lims[1]) plt.xlim(0,lims[0])
def negIP(self): """Take the negation of F: F.negIP() => F(x) <- (-F(x)) (in-place)""" np.negative(self.t, out=self.t) return self
def strassen_matrix_multiplication(A, B): n = len(A) if n == 1: return [[A[0][0] * B[0][0]]] l = math.floor(n / 2) Ak = array(A) Bk = array(B) #for i in range(l): A11 = Ak[0:l, 0:l] A12 = Ak[0:l, l:n] A21 = Ak[l:n, 0:l] A22 = Ak[l:n, l:n] B11 = Bk[0:l, 0:l] B12 = Bk[0:l, l:n] B21 = Bk[l:n, 0:l] B22 = Bk[l:n, l:n] #print() #print("A11",A11) #print() #print("A12",A12.tolist()) #print() #print("A21",A21.tolist()) #print() #print("A22",A22.tolist()) #print() K1 = [] K2 = [] K3 = [] K4 = [] K5 = [] K6 = [] K7 = [] K8 = [] K9 = [] K10 = [] #M1= (A11+A22) (B11+B22) #M2= (A21+A22)B11 #M3=A11(B12−B22) #M4=A22(B21−B11) #M5= (A11+A12)B22 #M6= (A21−A11)(B11+B12) #M7= (A12−A22)(B21+B22) K1 = numpy.zeros(shape=(l, l), dtype=int).tolist() #print("K1",K1) K1 = A11 + A22 K2 = numpy.zeros(shape=(l, l), dtype=int).tolist() K2 = B11 + B22 K3 = numpy.zeros(shape=(l, l), dtype=int).tolist() K3 = A21 + A22 K4 = numpy.zeros(shape=(l, l), dtype=int).tolist() K4 = B12 - B22 K5 = numpy.zeros(shape=(l, l), dtype=int).tolist() K5 = B21 - B11 K6 = numpy.zeros(shape=(l, l), dtype=int).tolist() K6 = A11 + A12 K7 = numpy.zeros(shape=(l, l), dtype=int).tolist() K7 = A21 - A11 K8 = numpy.zeros(shape=(l, l), dtype=int).tolist() K8 = B11 + B12 K9 = numpy.zeros(shape=(l, l), dtype=int).tolist() K9 = A12 - A22 K10 = numpy.zeros(shape=(l, l), dtype=int).tolist() K10 = B21 + B22 P1 = numpy.zeros(shape=(l, l), dtype=int) P1 = numpy.array(strassen_matrix_multiplication(K1, K2)) P2 = numpy.zeros(shape=(l, l), dtype=int) P2 = numpy.array(strassen_matrix_multiplication(K3, B11)) P3 = numpy.zeros(shape=(l, l), dtype=int) P3 = numpy.array(strassen_matrix_multiplication(A11, K4)) P4 = numpy.zeros(shape=(l, l), dtype=int) P4 = numpy.array(strassen_matrix_multiplication(A22, K5)) P5 = numpy.zeros(shape=(l, l), dtype=int) P5 = numpy.array(strassen_matrix_multiplication(K6, B22)) P6 = numpy.zeros(shape=(l, l), dtype=int) P6 = numpy.array(strassen_matrix_multiplication(K7, K8)) P7 = numpy.zeros(shape=(l, l), dtype=int) P7 = numpy.array(strassen_matrix_multiplication(K9, K10)) #C11=M1+M4−M5+M7 #C12=M3+M5 #C21=M2+M4 #C22=M1−M2+M3 +M6 L1 = numpy.zeros(shape=(l, l), dtype=int) L1 = P1 + P4 L2 = numpy.zeros(shape=(l, l), dtype=int) L2 = P7 + numpy.negative(P5) C11 = numpy.zeros(shape=(l, l), dtype=int) C11 = L1 + L2 C12 = numpy.zeros(shape=(l, l), dtype=int) C12 = P3 + P5 C21 = numpy.zeros(shape=(l, l), dtype=int) C21 = P2 + P4 L3 = numpy.zeros(shape=(l, l), dtype=int) L3 = P1 + numpy.negative(P2) L4 = numpy.zeros(shape=(l, l), dtype=int) L4 = P3 + P6 C22 = numpy.zeros(shape=(l, l), dtype=int) C22 = L3 + L4 print() print() print() #print("C11",C11) #print() #print("C12",C12) #print() #print("C21",C21) #print() #print("C22",C22) C11 = C11.tolist() C12 = C12.tolist() C21 = C21.tolist() C22 = C22.tolist() #print(type(C12)) C1 = numpy.zeros(shape=(l, l), dtype=int).tolist() C2 = numpy.zeros(shape=(l, l), dtype=int).tolist() C = [] for i in range(l): C1[i] = C11[i][:] + C12[i][:] C2[i] = C21[i][:] + C22[i][:] for i in range(l): C.append([0] * l) #for i in range(l): C = C1 + C2 #print("C1",C1) #print("C2",C2) #print() return C
def np_sigmoid(x): return np.divide(1, np.add(1, np.exp(np.negative(x))))
def test_half_ufuncs(self): """Test the various ufuncs""" a = np.array([0, 1, 2, 4, 2], dtype=float16) b = np.array([-2, 5, 1, 4, 3], dtype=float16) c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) assert_equal(np.equal(a, b), [False, False, False, True, False]) assert_equal(np.not_equal(a, b), [True, True, True, False, True]) assert_equal(np.less(a, b), [False, True, False, False, True]) assert_equal(np.less_equal(a, b), [False, True, False, True, True]) assert_equal(np.greater(a, b), [True, False, True, False, False]) assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) assert_equal(np.logical_and(a, b), [False, True, True, True, True]) assert_equal(np.logical_or(a, b), [True, True, True, True, True]) assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) assert_equal(np.logical_not(a), [True, False, False, False, False]) assert_equal(np.isnan(c), [False, False, False, True, False]) assert_equal(np.isinf(c), [False, False, True, False, False]) assert_equal(np.isfinite(c), [True, True, False, False, True]) assert_equal(np.signbit(b), [True, False, False, False, False]) assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) with suppress_warnings() as sup: sup.record(RuntimeWarning) x = np.maximum(b, c) assert_(np.isnan(x[3])) assert_equal(len(sup.log), 1) x[3] = 0 assert_equal(x, [0, 5, 1, 0, 6]) assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) with suppress_warnings() as sup: sup.record(RuntimeWarning) x = np.minimum(b, c) assert_(np.isnan(x[3])) assert_equal(len(sup.log), 1) x[3] = 0 assert_equal(x, [-2, -1, -np.inf, 0, 3]) assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) assert_equal(np.square(b), [4, 25, 1, 16, 9]) assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) assert_equal(np.conjugate(b), b) assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) assert_equal(np.negative(b), [2, -5, -1, -4, -3]) assert_equal(np.positive(b), b) assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def create_psych_curve_plot(sessions): data_mean = [] data_errorbar = [] data_fit = [] for session in sessions.fetch('KEY'): contrasts, prob_right, prob_left, \ threshold, bias, lapse_low, lapse_high, \ n_trials, n_trials_right = \ (sessions & session).fetch1( 'signed_contrasts', 'prob_choose_right', 'prob_left', 'threshold', 'bias', 'lapse_low', 'lapse_high', 'n_trials_stim', 'n_trials_stim_right') pars = [bias, threshold, lapse_low, lapse_high] contrasts = contrasts * 100 contrasts_fit = np.arange(-100, 100) prob_right_fit = psy.erf_psycho_2gammas(pars, contrasts_fit) ci = smp.proportion_confint( n_trials_right, n_trials, alpha=0.032, method='normal') - prob_right curve_color, error_color = get_color(prob_left, 0.3) behavior_data = go.Scatter( x=contrasts.tolist(), y=prob_right.tolist(), marker=dict(size=6, color=curve_color, line=dict(color='white', width=1)), mode='markers', name=f'p_left = {prob_left}, data with 68% CI') behavior_errorbar = go.Scatter(x=contrasts.tolist(), y=prob_right.tolist(), error_y=dict(type='data', array=ci[0].tolist(), arrayminus=np.negative( ci[1]).tolist(), visible=True, color=error_color), marker=dict(size=6, ), mode='none', showlegend=False) behavior_fit = go.Scatter(x=contrasts_fit.tolist(), y=prob_right_fit.tolist(), name=f'p_left = {prob_left} model fits', marker=dict(color=curve_color)) data_mean.append(behavior_data) data_errorbar.append(behavior_errorbar) data_fit.append(behavior_fit) layout = go.Layout(width=630, height=350, title=dict(text='Psychometric Curve', x=0.25, y=0.85), xaxis=dict(title='Contrast (%)'), yaxis=dict(title='Probability choosing right', range=[-0.05, 1.05]), template=dict(layout=dict(plot_bgcolor="white"))) data = data_errorbar for element in data_fit: data.append(element) for element in data_mean: data.append(element) return go.Figure(data=data, layout=layout)
def neg(x): return np.negative(x)
def __neg__(self): neg_n_body_tensors = dict() for key in self.n_body_tensors: neg_n_body_tensors[key] = numpy.negative(self.n_body_tensors[key]) return PolynomialTensor(neg_n_body_tensors)
if r != 0: # A: singular return False for i in range(k): if out[i] <= 0: return False val = out[-1] if k == m: return True own_supp_flags = np.zeros(m, np.bool_) own_supp_flags[own_supp] = True for i in range(m): if not own_supp_flags[i]: payoff = 0 for j in range(k): payoff += payoff_matrix[i, opp_supp[j]] * out[j] if payoff > val: return False return True playerA = np.array([[0, -1, 1], [1, 0, -1], [-1, 1, 0]]) playerB = np.negative(playerA) rps = NormalFormGame(playerA) #([[playerA, playerB]]) #print(playerA) #print(playerB) print(rps) support_enumeration(rps)
def plot_summary(all_true_states, all_mean_belief, all_variance_belief, sample_period, all_kt=None): time_steps = list(range(len(all_true_states))) time_steps_in_seconds = [t*sample_period for t in time_steps] all_true_states = np.array(all_true_states) all_mean_belief = np.array(all_mean_belief) all_variance_belief = np.array(all_variance_belief) true_x = all_true_states[:, 0, 0] true_y = all_true_states[:, 1, 0] true_theta = all_true_states[:, 2, 0] mean_beliefs_about_x = all_mean_belief[:, 0, 0] mean_beliefs_about_y = all_mean_belief[:, 1, 0] mean_beliefs_about_theta = all_mean_belief[:, 2, 0] var_beliefs_about_x = all_variance_belief[:, 0, 0] var_beliefs_about_y = all_variance_belief[:, 1, 0] var_beliefs_about_theta = all_variance_belief[:, 2, 0] # Add static plots _, axes = plt.subplots(3, 2, figsize=(15, 15)) ax1 = axes[0, 0] ax2 = axes[1, 0] ax3 = axes[1, 1] ax4 = axes[0, 1] ax5 = axes[2, 0] ax6 = axes[2, 1] ax1.plot(time_steps_in_seconds, true_x) ax1.plot(time_steps_in_seconds, mean_beliefs_about_x, '--') ax1.plot(time_steps_in_seconds, true_y) ax1.plot(time_steps_in_seconds, mean_beliefs_about_y, '--') ax1.plot(time_steps_in_seconds, true_theta) ax1.plot(time_steps_in_seconds, mean_beliefs_about_theta, '--') ax1.set_title("State vs Mean belief about State") ax1.set_xlabel("Time (s)") ax1.legend(["Actual X", "Mean X Belief", "Actual Y", "Mean Y Belief", "Actual Theta", "Mean Theta Belief"]) x_error = [ (xt-mean_beliefs_about_x[i]) for i, xt in enumerate(true_x)] ax2.plot(time_steps_in_seconds, x_error) ax2.plot(time_steps_in_seconds, np.sqrt(var_beliefs_about_x)*2, 'b--') ax2.plot(time_steps_in_seconds, np.negative( np.sqrt(np.abs(var_beliefs_about_x))*2), 'b--') ax2.legend(["X Error", "X Variance"]) ax2.set_title("Error from X and mean belief") ax2.set_xlabel("Time (s)") ax2.set_ylabel("X (m)") ax2.set_ylim(-0.5, 0.5) y_error = [ (vt-mean_beliefs_about_y[i])for i, vt in enumerate(true_y)] ax3.plot(time_steps_in_seconds, y_error) ax3.plot(time_steps_in_seconds, np.sqrt(np.abs(var_beliefs_about_y))*2, 'y--') ax3.plot(time_steps_in_seconds, np.negative(np.sqrt(np.abs(var_beliefs_about_y))*2), 'y--') ax3.legend(["Y Error", "Y Variance"]) ax3.set_title("Error from Y and mean belief") ax3.set_xlabel("Time (s)") ax3.set_ylabel("Y (m)") ax3.set_ylim(-0.5, 0.5) theta_error = [ (vt-mean_beliefs_about_theta[i])for i, vt in enumerate(true_theta)] ax4.plot(time_steps_in_seconds, theta_error) ax4.plot(time_steps_in_seconds, np.sqrt(np.abs(var_beliefs_about_theta))*2, 'y--') ax4.plot(time_steps_in_seconds, np.negative(np.sqrt(np.abs(var_beliefs_about_theta))*2), 'y--') ax4.legend(["Theta Error", "Theta Variance"]) ax4.set_title("Error from theta and mean belief") ax4.set_xlabel("Time (s)") ax4.set_ylabel("Theta (radians)") ax4.set_ylim(-0.174, 0.174) if all_kt is not None: ax5.plot(time_steps_in_seconds, np.array(all_kt)[:, 0, 0]) ax5.plot(time_steps_in_seconds, np.array(all_kt)[:, 1, 0]) ax5.plot(time_steps_in_seconds, np.array(all_kt)[:, 2, 0]) ax5.plot(time_steps_in_seconds, np.array(all_kt)[:, 0, 1]) ax5.plot(time_steps_in_seconds, np.array(all_kt)[:, 1, 1]) ax5.plot(time_steps_in_seconds, np.array(all_kt)[:, 2, 1]) ax5.set_title("Kalman filter gain for position") ax5.legend(["X kalman gain range", "Y kalman gain range", "Theta Kalman Gain range", "X kalman gain bearing", "Y kalman gain bearing", "Theta Kalman Gain bearing"]) sc = plt.imshow(all_variance_belief[-1], cmap='Blues', interpolation='nearest', origin='lower') plt.colorbar(sc) plt.show() plt.pause(200)
def gpdfitnew(x, sort=True, sort_in_place=False, return_quadrature=False): """Estimate the paramaters for the Generalized Pareto Distribution (GPD) Returns empirical Bayes estimate for the parameters of the two-parameter generalized Parato distribution given the data. Parameters ---------- x : ndarray One dimensional data array sort : bool or ndarray, optional If known in advance, one can provide an array of indices that would sort the input array `x`. If the input array is already sorted, provide False. If True (default behaviour), the array is sorted internally. sort_in_place : bool, optional If `sort` is True and `sort_in_place` is True, the array is sorted in-place (False by default). return_quadrature : bool, optional If True, quadrature points and weight `ks` and `w` of the marginal posterior distribution of k are also calculated and returned. False by default. Returns ------- k, sigma : float estimated parameter values ks, w : ndarray Quadrature points and weights of the marginal posterior distribution of `k`. Returned only if `return_quadrature` is True. Notes ----- This function returns a negative of Zhang and Stephens's k, because it is more common parameterisation. """ if x.ndim != 1 or len(x) <= 1: raise ValueError("Invalid input array.") # check if x should be sorted if sort is True: if sort_in_place: x.sort() xsorted = True else: sort = np.argsort(x) xsorted = False elif sort is False: xsorted = True else: xsorted = False n = len(x) PRIOR = 3 m = 30 + int(np.sqrt(n)) bs = np.arange(1, m + 1, dtype=float) bs -= 0.5 np.divide(m, bs, out=bs) np.sqrt(bs, out=bs) np.subtract(1, bs, out=bs) if xsorted: bs /= PRIOR * x[int(n / 4 + 0.5) - 1] bs += 1 / x[-1] else: bs /= PRIOR * x[sort[int(n / 4 + 0.5) - 1]] bs += 1 / x[sort[-1]] ks = np.negative(bs) temp = ks[:, None] * x np.log1p(temp, out=temp) np.mean(temp, axis=1, out=ks) L = bs / ks np.negative(L, out=L) np.log(L, out=L) L -= ks L -= 1 L *= n temp = L - L[:, None] np.exp(temp, out=temp) w = np.sum(temp, axis=1) np.divide(1, w, out=w) # remove negligible weights dii = w >= 10 * np.finfo(float).eps if not np.all(dii): w = w[dii] bs = bs[dii] # normalise w w /= w.sum() # posterior mean for b b = np.sum(bs * w) # Estimate for k, note that we return a negative of Zhang and # Stephens's k, because it is more common parameterisation. temp = (-b) * x np.log1p(temp, out=temp) k = np.mean(temp) if return_quadrature: np.negative(x, out=temp) temp = bs[:, None] * temp np.log1p(temp, out=temp) ks = np.mean(temp, axis=1) # estimate for sigma sigma = -k / b * n / (n - 0) # weakly informative prior for k a = 10 k = k * n / (n + a) + a * 0.5 / (n + a) if return_quadrature: ks *= n / (n + a) ks += a * 0.5 / (n + a) if return_quadrature: return k, sigma, ks, w else: return k, sigma
def iterateMatrix(matrix, goTerms, goEnrichment, background): """ return a dictionary Keyword arguments: matrix -- numerical matrix of semantic similarities goTerms -- list of goTerms goEnrichment -- GO enrichment result background -- flattened background: lists of genes and GO Terms iterates through semantic similarity matrix in decreasing ss order """ numGenes = len(list(dict.fromkeys(background[0]))) max = goEnrichment.max().max() min = goEnrichment.min().min() # p-values are considered similar if they have a maximum difference of 5% maxDiff = (max - min) * 0.05 # frequencies of GO temrs frequencies = dict(Counter(background[1])) frequencies = {k: v / numGenes for k, v in frequencies.items()} avgs = dict() # calculate averages for each term for uniqueness value for index, term in enumerate(goTerms): col = matrix[:, index] row = matrix[index, :] col = col[col != -1] row = row[row != -1] avgs[term] = (col.sum() + row.sum()) / (len(goTerms) - 1) # stores tree structure tree = dict() # stores additional data for each GO term goList = dict() while len(goTerms) > 0: maxValue = np.amax(np.ravel(matrix)) # get most similar pair of GO terms indices = np.where(matrix == maxValue) indices = list(zip(indices[0], indices[1]))[0] termA = goTerms[indices[0]] termB = goTerms[indices[1]] # calculate which GO term is rejected delete = testGoTerms(termA, termB, goEnrichment, background, frequencies, maxDiff) toDelete = delete["term"] if toDelete == termA: deleteIndex = indices[0] toKeep = termB else: deleteIndex = indices[1] toKeep = termA # add GO terms to current tree dict if toKeep != toDelete: if toDelete in tree: if toKeep in tree: # if both terms are in the tree dict, it means that they form non-connected subtrees. # The tree dict of the rejected term is placed as the child of the kept term tree[toKeep][toDelete] = tree[toDelete] else: # if the kept term is not in the tree dict but the rejected is, # the kept term will be placed as the parent of the rejected term tree[toKeep] = {} tree[toKeep][toDelete] = tree[toDelete] else: if toKeep in tree: # if the kept term is in the tree dict, but the rejected is not, # the rejected term is added as a child of the kept term tree[toKeep][toDelete] = toDelete else: # if none of the terms are in the tree dict, a new entry is created tree[toKeep] = {toDelete: toDelete} # not connected trees that have the rejected term as a parent are deleted # as the rejected term is now incorporated in the final tree dict structure tree.pop(toDelete, None) else: maxValue = 0 goList[toDelete] = { "termID": toDelete, "description": godag[toDelete].name, "frequency": calculateFrequency(toDelete, frequencies), "rejection": delete["rejection"], "uniqueness": 1 - avgs[toDelete], "dispensability": maxValue, "pvalues": np.negative(2 * np.log(goEnrichment.loc[toDelete, :].values)).tolist() } # delete rejected term from list of GO terms and from ss matrix goTerms = np.delete(goTerms, deleteIndex) matrix = np.delete(matrix, deleteIndex, 0) matrix = np.delete(matrix, deleteIndex, 1) return {"tree": tree, "data": goList}
def logistic_func(X, bayta1, bayta2, bayta3, bayta4): logisticPart = 1 + np.exp( np.negative(np.divide(X - bayta3, np.abs(bayta4)))) yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart) return yhat
rightXYZ = [0, 0, 0] print("leftId : ", leftId) print("rightId : ", rightId) print("noseId : ", noseId) pld.GetPoints().GetPoint(noseId, noseXYZ) pld.GetPoints().GetPoint(leftId, leftXYZ) pld.GetPoints().GetPoint(rightId, rightXYZ) print("leftCoord : ", leftXYZ) print("rightCoord : ", rightXYZ) print("noseCoord : ", noseXYZ) center = np.add(rightXYZ, leftXYZ) / 2 ex = np.add(rightXYZ, np.negative(leftXYZ)) ex = ex / np.linalg.norm(ex) ey = np.add(center, np.negative(noseXYZ)) ey = ey / np.linalg.norm(ey) # cross product to calculate a normal vector to plane of ex and ey ez = np.cross(ex, ey) ez = ez / np.linalg.norm(ez) rotM = vtk.vtkMatrix4x4() rotM.Identity() for i in range(0, 3): rotM.SetElement(0, i, ex[i]) rotM.SetElement(1, i, ey[i]) rotM.SetElement(2, i, ez[i])
def called_member(self, a): return np.negative(a)
def quaternion_from_transformation_matrix(matrix, isprecise=False): """Return quaternion from rotation matrix. If isprecise is True, the input matrix is assumed to be a precise rotation matrix and a faster algorithm is used. >>> q = quaternion_from_matrix(numpy.identity(4), True) >>> numpy.allclose(q, [1, 0, 0, 0]) True >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1])) >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0]) True >>> R = rotation_matrix(0.123, (1, 2, 3)) >>> q = quaternion_from_matrix(R, True) >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786]) True >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0], ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]] >>> q = quaternion_from_matrix(R) >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611]) True >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0], ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]] >>> q = quaternion_from_matrix(R) >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603]) True >>> R = random_rotation_matrix() >>> q = quaternion_from_matrix(R) >>> is_same_transform(R, quaternion_matrix(q)) True >>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False), ... quaternion_from_matrix(R, isprecise=True)) True >>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0) >>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False), ... quaternion_from_matrix(R, isprecise=True)) True """ M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4] if isprecise: q = np.empty((4, )) t = np.trace(M) if t > M[3, 3]: q[0] = t q[3] = M[1, 0] - M[0, 1] q[2] = M[0, 2] - M[2, 0] q[1] = M[2, 1] - M[1, 2] else: i, j, k = 0, 1, 2 if M[1, 1] > M[0, 0]: i, j, k = 1, 2, 0 if M[2, 2] > M[i, i]: i, j, k = 2, 0, 1 t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] q[i] = t q[j] = M[i, j] + M[j, i] q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q = q[[3, 0, 1, 2]] q *= 0.5 / math.sqrt(t * M[3, 3]) else: m00 = M[0, 0] m01 = M[0, 1] m02 = M[0, 2] m10 = M[1, 0] m11 = M[1, 1] m12 = M[1, 2] m20 = M[2, 0] m21 = M[2, 1] m22 = M[2, 2] # symmetric matrix K K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0], [m01 + m10, m11 - m00 - m22, 0.0, 0.0], [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]]) K /= 3.0 # quaternion is eigenvector of K that corresponds to largest eigenvalue w, V = np.linalg.eigh(K) q = V[[3, 0, 1, 2], np.argmax(w)] if q[0] < 0.0: np.negative(q, q) return q
def __neg__(self): """Return the negative of F: G = -F => G(x) = -F(x) for all x""" return Factor().__build(self.v.copy(), np.negative(self.t))
arr_i = buildArray('i.quadratic') arr_u = buildArray('u.quadratic') allMovieMetadata = getMetadata() dotProds = [] watchedMovies = [] for i in range(0, 10000): x = Weight((arr_u[i].id, arr_i[i].id), np.dot(arr_u[i].weights, arr_i[i].weights)) dotProds.append(x) watchedMovies.append((arr_u[i].id, arr_i[i].id)) # build -1/1 matrix (extremely sparse) seenMovies = np.ones((totalUsers, totalMovies)) seenMovies = np.negative(seenMovies) for watch in watchedMovies: # print(str(watch[0]) + "\t" + str(watch[1])) seenMovies[watch[0] - 1][watch[1] - 1] = 1 print(len(allMovieMetadata)) # assign metadata for logistic regression, this model is 30 GB!! dfs = [] with open('data.vw', 'w') as f: for i in range(totalUsers): print("processing user: "******" ...") for j in range(totalMovies): # print(i , j) # print("J", j) m = allMovieMetadata[j - 1].metadata
def _multiply_no_nan(x, y, name=None): # pylint: disable=unused-argument dtype = np.result_type(x, y) # TODO(b/146385087): The gradient should be # `lambda dz: [multiply_no_nan(dz, y), multiply_no_nan(x, dz)]`. return np.where(np.equal(y, 0.), np.zeros((), dtype=dtype), np.multiply(x, y)) multiply_no_nan = utils.copy_docstring('tf.math.multiply_no_nan', _multiply_no_nan) ndtri = utils.copy_docstring('tf.math.ndtri', lambda x, name=None: scipy_special.ndtri(x)) negative = utils.copy_docstring('tf.math.negative', lambda x, name=None: np.negative(x)) nextafter = utils.copy_docstring( 'tf.math.nextafter', lambda x1, x2, name=None: np.nextafter(x1, x2)) not_equal = utils.copy_docstring('tf.math.not_equal', lambda x, y, name=None: np.not_equal(x, y)) polygamma = utils.copy_docstring( 'tf.math.polygamma', lambda a, x, name=None: scipy_special.polygamma(np.int32(a), x).astype( # pylint: disable=unused-argument,g-long-lambda utils.common_dtype([a, x], dtype_hint=np.float32))) polyval = utils.copy_docstring( 'tf.math.polyval', lambda coeffs, x, name=None: np.polyval(coeffs, x))
def generate(self, serial_number_list, original_img_list, generated_img_list, defect_category_list, bbox_list, dataset_name, is_difference_img_preprocessed, is_removed=True, properties=None): ok_data_save_dir_path, ng_data_save_dir_path = self._create_save_directory( directory_name=dataset_name, is_removed=is_removed) serial_number_list = list( map(lambda serial_number: serial_number.split("_")[0], serial_number_list)) original_img_path_list = list() for i, (serial_number, original_img, generated_img, defect_category, bbox) in enumerate( zip(serial_number_list, original_img_list, generated_img_list, defect_category_list, bbox_list)): difference_img = self.get_difference_img( original_img=original_img, generated_img=generated_img, is_preprocessed=is_difference_img_preprocessed, is_boundary_mask=False) save_img = None # Generate label.csv # Controller with save category if dataset_name == "difference_img": pos_difference_img = np.where(difference_img >= 0, difference_img, 0) neg_difference_img = np.negative( np.where(difference_img <= 0, difference_img, 0)) abs_difference_img = np.abs(difference_img) # pos_difference_img = pos_difference_img.astype(np.uint8) # neg_difference_img = neg_difference_img.astype(np.uint8) # Save if defect_category[0] == 1: # Original image original_save_path = os.path.join( ok_data_save_dir_path, f"{serial_number}_{i}_{0}_o.npy") np.save(original_save_path, original_img) # Generated image generated_save_path = os.path.join( ok_data_save_dir_path, f"{serial_number}_{i}_{0}_g.npy") np.save(generated_save_path, generated_img) # Positive image pos_save_path = os.path.join( ok_data_save_dir_path, f"{serial_number}_{i}_{0}_p.npy") np.save(pos_save_path, pos_difference_img) # cv2.imwrite(pos_save_path, pos_difference_img) # Negative image neg_save_path = os.path.join( ok_data_save_dir_path, f"{serial_number}_{i}_{0}_n.npy") np.save(neg_save_path, neg_difference_img) # cv2.imwrite(neg_save_path, neg_difference_img) # Absolute image abs_save_path = os.path.join( ok_data_save_dir_path, f"{serial_number}_{i}_{0}_a.npy") np.save(abs_save_path, abs_difference_img) # cv2.imwrite(abs_save_path, abs_difference_img) else: for category_idx, defect in enumerate(defect_category): if defect == 1: # Original image original_save_path = os.path.join( ng_data_save_dir_path, f"{serial_number}_{i}_{category_idx}_o.npy") np.save(original_save_path, original_img) # Generated image generated_save_path = os.path.join( ng_data_save_dir_path, f"{serial_number}_{i}_{category_idx}_g.npy") np.save(generated_save_path, generated_img) # Positive image pos_save_path = os.path.join( ng_data_save_dir_path, f"{serial_number}_{i}_{category_idx}_p.npy") np.save(pos_save_path, pos_difference_img) # cv2.imwrite(pos_save_path, pos_difference_img) # Negative image neg_save_path = os.path.join( ng_data_save_dir_path, f"{serial_number}_{i}_{category_idx}_n.npy") np.save(neg_save_path, neg_difference_img) # cv2.imwrite(neg_save_path, neg_difference_img) # Absolute image abs_save_path = os.path.join( ng_data_save_dir_path, f"{serial_number}_{i}_{category_idx}_a.npy") np.save(abs_save_path, abs_difference_img) # cv2.imwrite(abs_save_path, abs_difference_img) else: continue else: if dataset_name == "defect_part": """ * properties cut_size=32 """ save_img = self.get_defect_img( difference_img=difference_img, **properties) save_img = save_img.astype(np.uint8) elif dataset_name == "important_pixel": """ * properties n=100 img_size=256 """ save_img = self.get_important_pixel_img( difference_img=difference_img, **properties) save_img = save_img.astype(np.uint8) # Save if defect_category[0] == 1: save_path = os.path.join(ok_data_save_dir_path, f"{serial_number}_{i}_{0}.png") cv2.imwrite(save_path, save_img) else: for category_idx, defect in enumerate(defect_category): if defect == 1: save_path = os.path.join( ng_data_save_dir_path, f"{serial_number}_{i}_{category_idx}.png") cv2.imwrite(save_path, save_img) else: continue
class negative(UnaryElemwise): _r = np.negative _d = lambda self, x: np.negative(np.ones_like(x))
def gaussian(self, x): sq = np.square(x) neg = np.negative(sq) return np.exp(neg)
def gpinv(p, k, sigma): """Inverse Generalised Pareto distribution function.""" x = np.empty(p.shape) x.fill(np.nan) if sigma <= 0: return x ok = (p > 0) & (p < 1) if np.all(ok): if np.abs(k) < np.finfo(float).eps: np.negative(p, out=x) np.log1p(x, out=x) np.negative(x, out=x) else: np.negative(p, out=x) np.log1p(x, out=x) x *= -k np.expm1(x, out=x) x /= k x *= sigma else: if np.abs(k) < np.finfo(float).eps: # x[ok] = - np.log1p(-p[ok]) temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) np.negative(temp, out=temp) x[ok] = temp else: # x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) temp *= -k np.expm1(temp, out=temp) temp /= k x[ok] = temp x *= sigma x[p == 0] = 0 if k >= 0: x[p == 1] = np.inf else: x[p == 1] = -sigma / k return x
np.insert(deltaThetaS, 0, np.amax(deltaThetaS)), (len(deltaThetaS) + 1, 1)) zipSortPlusS = np.append(zipSortS, deltaThetaS, axis=1) lineOutS = np.argmin(zipSortPlusS, axis=0)[-1] zipSortMinusS = np.delete(zipSortPlusS, lineOutS, axis=0) #Integrating the fluxes towards the cylinder UrLocalS = zipSortMinusS[:, -3] #Third column from end to beginning is Ur rpLocalS = zipSortMinusS[:, 1] #Second column (starting from 0 index) is rpLocal thetaLocalS = zipSortMinusS[:, 0] #First column (starting from 0 index) is thetaLocal print("The size of thetaLocalS is: ", thetaLocalS.shape) NTheta = np.size(thetaLocalS) UrMClippedS = np.negative(np.clip(UrLocalS, -np.finfo('d').max, 0)) ##kAreaS=deltaThetaS[0]*(rpLocalS[0]+1)*rCollector*deltaL[2] ##radialVolumeAccS=UrMClippedS*kAreaS*deltaT #Writing the angles headers if it is the first line to write if (existingLastEnd == -1000): outputLine = np.reshape(np.insert(thetaLocalS, 0, [-1000, -1000]), (1, NTheta + 2)) print("The shape of outputLine is: ", outputLine.shape) fOut = open(outputName, mode='ab') np.savetxt(fOut, outputLine) fOut.close() #End if existingLastEnd==-1000 (For printing the header #Finding the beginning and end of accumulation for the first time indexIni = startIndexTimes
def func(arr): return np.negative(arr)
grid = np.arange(16).reshape((4, 4)) upper, lower = np.vsplit(grid, [2]) left, right = np.hsplit(grid, [2]) print(upper) print(lower) print(left) print(right) ########################################### # 通用函数 ########################################### np.arange(5) / np.arange(1, 6) x = np.arange(9).reshape((3, 3)) 2**x np.add(1, 2) np.subtract(1, 2) np.negative(2) np.multiply(2, 3) np.divide(6, 2) np.floor_divide(3, 2) np.power(2, 3) np.mod(9, 4) np.abs(-8) np.sin(np.pi) np.arctan(1) np.exp(4) np.log(10) np.log2(2) x = np.arange(5) y = np.empty(5) np.multiply(x, 10, out=y)
def __init__(self, T, K, sigma=0.5, _lambda=0.5): self.SPEED_TO_ERPM_OFFSET = float( rospy.get_param("/vesc/speed_to_erpm_offset", 0.0)) self.SPEED_TO_ERPM_GAIN = float( rospy.get_param("/vesc/speed_to_erpm_gain", 4614.0)) self.STEERING_TO_SERVO_OFFSET = float( rospy.get_param("/vesc/steering_angle_to_servo_offset", 0.5304)) self.STEERING_TO_SERVO_GAIN = float( rospy.get_param("/vesc/steering_angle_to_servo_gain", -1.2135)) self.CAR_LENGTH = 0.33 self.last_pose = None # MPPI params self.T = T # Length of rollout horizon self.K = K # Number of sample rollouts self.sigma = sigma self._lambda = _lambda self.goal = None # Lets keep track of the goal pose (world frame) over time self.lasttime = None # PyTorch / GPU data configuration # TODO # you should pre-allocate GPU memory when you can, and re-use it when # possible for arrays storing your controls or calculated MPPI costs, etc model_name = rospy.get_param("~nn_model", "myneuralnetisbestneuralnet.pt") self.model = torch.load(model_name) self.model.cuda() # tell torch to run the network on the GPU self.dtype = torch.cuda.FloatTensor print("Loading:", model_name) print("Model:\n", self.model) print("Torch Datatype:", self.dtype) # control outputs self.msgid = 0 # visualization paramters self.num_viz_paths = 40 if self.K < self.num_viz_paths: self.num_viz_paths = self.K # We will publish control messages and a way to visualize a subset of our # rollouts, much like the particle filter self.ctrl_pub = rospy.Publisher(rospy.get_param( "~ctrl_topic", "/vesc/high_level/ackermann_cmd_mux/input/nav0"), AckermannDriveStamped, queue_size=2) self.path_pub = rospy.Publisher("/mppi/paths", Path, queue_size=self.num_viz_paths) # Use the 'static_map' service (launched by MapServer.launch) to get the map map_service_name = rospy.get_param("~static_map", "static_map") print("Getting map from service: ", map_service_name) rospy.wait_for_service(map_service_name) map_msg = rospy.ServiceProxy( map_service_name, GetMap)().map # The map, will get passed to init of sensor model self.map_info = map_msg.info # Save info about map for later use print("Map Information:\n", self.map_info) # Create numpy array representing map for later use self.map_height = map_msg.info.height self.map_width = map_msg.info.width array_255 = np.array(map_msg.data).reshape( (map_msg.info.height, map_msg.info.width)) self.permissible_region = np.zeros_like(array_255, dtype=bool) self.permissible_region[ array_255 == 0] = 1 # Numpy array of dimension (map_msg.info.height, map_msg.info.width), # With values 0: not permissible, 1: permissible self.permissible_region = np.negative( self.permissible_region) # 0 is permissible, 1 is not print("Making callbacks") self.goal_sub = rospy.Subscriber("/move_base_simple/goal", PoseStamped, self.clicked_goal_cb, queue_size=1) self.pose_sub = rospy.Subscriber("/pf/ta/viz/inferred_pose", PoseStamped, self.mppi_cb, queue_size=1)
def predict_image(self, inRaster, outRaster, model=None, inMask=None, confidenceMap=None, confidenceMapPerClass=None, NODATA=0, SCALE=None, classifier='GMM', feedback=None): """!@brief The function classify the whole raster image, using per block image analysis. The classifier is given in classifier and options in kwargs Input : inRaster : Filtered image name ('sample_filtered.tif',str) outRaster :Raster image name ('outputraster.tif',str) model : model file got from precedent step ('model', str) inMask : mask to confidenceMap : map of confidence per pixel NODATA : Default set to 0 (int) SCALE : Default set to None classifier = Default 'GMM' Output : nothing but save a raster image and a confidence map if asked """ # Open Raster and get additionnal information raster = gdal.Open(inRaster, gdal.GA_ReadOnly) if raster is None: # fix_print_with_import print('Impossible to open ' + inRaster) exit() if inMask is None: mask = None else: mask = gdal.Open(inMask, gdal.GA_ReadOnly) if mask is None: # fix_print_with_import print('Impossible to open ' + inMask) exit() # Check size if (raster.RasterXSize != mask.RasterXSize) or ( raster.RasterYSize != mask.RasterYSize): # fix_print_with_import print('Image and mask should be of the same size') exit() if SCALE is not None: M, m = np.asarray(SCALE[0]), np.asarray(SCALE[1]) # Get the size of the image d = raster.RasterCount nc = raster.RasterXSize nl = raster.RasterYSize # Get the geoinformation GeoTransform = raster.GetGeoTransform() Projection = raster.GetProjection() # Get block size band = raster.GetRasterBand(1) block_sizes = band.GetBlockSize() x_block_size = block_sizes[0] y_block_size = block_sizes[1] del band # Initialize the output if not os.path.exists(os.path.dirname(outRaster)): os.makedirs(os.path.dirname(outRaster)) driver = gdal.GetDriverByName('GTiff') if np.amax(model.classes_) > 255: dtype = gdal.GDT_UInt16 else: dtype = gdal.GDT_Byte dst_ds = driver.Create(outRaster, nc, nl, 1, dtype) dst_ds.SetGeoTransform(GeoTransform) dst_ds.SetProjection(Projection) out = dst_ds.GetRasterBand(1) if classifier != 'GMM': nClass = len(model.classes_) if confidenceMap: dst_confidenceMap = driver.Create(confidenceMap, nc, nl, 1, gdal.GDT_Int16) dst_confidenceMap.SetGeoTransform(GeoTransform) dst_confidenceMap.SetProjection(Projection) out_confidenceMap = dst_confidenceMap.GetRasterBand(1) if confidenceMapPerClass: dst_confidenceMapPerClass = driver.Create(confidenceMapPerClass, nc, nl, nClass, gdal.GDT_Int16) dst_confidenceMapPerClass.SetGeoTransform(GeoTransform) dst_confidenceMapPerClass.SetProjection(Projection) # Perform the classification total = nl * y_block_size pushFeedback('Predicting model...') if feedback == 'gui': progress = pB.progressBar('Predicting model...', total / 10) for i in range(0, nl, y_block_size): if not 'lastBlock' in locals(): lastBlock = i if int(lastBlock / total * 100) != int(i / total * 100): lastBlock = i pushFeedback(int(i / total * 100)) if feedback == 'gui': progress.addStep() if i + y_block_size < nl: # Check for size consistency in Y lines = y_block_size else: lines = nl - i for j in range(0, nc, x_block_size): # Check for size consistency in X if j + x_block_size < nc: cols = x_block_size else: cols = nc - j # Load the data and Do the prediction X = np.empty((cols * lines, d)) for ind in range(d): X[:, ind] = raster.GetRasterBand(int(ind + 1)).ReadAsArray( j, i, cols, lines).reshape(cols * lines) # Do the prediction band_temp = raster.GetRasterBand(1) nodata_temp = band_temp.GetNoDataValue() if nodata_temp is None: nodata_temp = -9999 if mask is None: band_temp = raster.GetRasterBand(1) mask_temp = band_temp.ReadAsArray( j, i, cols, lines).reshape(cols * lines) temp_nodata = np.where(mask_temp != nodata_temp)[0] #t = np.where((mask_temp!=0) & (X[:,0]!=NODATA))[0] t = np.where(X[:, 0] != nodata_temp)[0] yp = np.zeros((cols * lines, )) #K = np.zeros((cols*lines,)) if confidenceMapPerClass or confidenceMap and classifier != 'GMM': K = np.zeros((cols * lines, nClass)) K[:, :] = -1 else: K = np.zeros((cols * lines)) K[:] = -1 else: mask_temp = mask.GetRasterBand(1).ReadAsArray( j, i, cols, lines).reshape(cols * lines) t = np.where((mask_temp != 0) & (X[:, 0] != nodata_temp))[0] yp = np.zeros((cols * lines, )) yp[:] = NODATA #K = np.zeros((cols*lines,)) if confidenceMapPerClass or confidenceMap and classifier != 'GMM': K = np.ones((cols * lines, nClass)) K = np.negative(K) else: K = np.zeros((cols * lines)) K = np.negative(K) # TODO: Change this part accorindgly ... if t.size > 0: if confidenceMap and classifier == 'GMM': yp[t], K[t] = model.predict( self.scale(X[t, :], M=M, m=m), None, confidenceMap) elif confidenceMap or confidenceMapPerClass and classifier != 'GMM': yp[t] = model.predict(self.scale(X[t, :], M=M, m=m)) K[t, :] = model.predict_proba( self.scale(X[t, :], M=M, m=m)) * 100 else: yp[t] = model.predict(self.scale(X[t, :], M=M, m=m)) #QgsMessageLog.logMessage('amax from predict proba is : '+str(sp.amax(model.predict.proba(self.scale(X[t,:],M=M,m=m)),axis=1))) # Write the data out.WriteArray(yp.reshape(lines, cols), j, i) out.SetNoDataValue(NODATA) out.FlushCache() if confidenceMap and classifier == 'GMM': K *= 100 out_confidenceMap.WriteArray(K.reshape(lines, cols), j, i) out_confidenceMap.SetNoDataValue(-1) out_confidenceMap.FlushCache() if confidenceMap and classifier != 'GMM': Kconf = np.amax(K, axis=1) out_confidenceMap.WriteArray(Kconf.reshape(lines, cols), j, i) out_confidenceMap.SetNoDataValue(-1) out_confidenceMap.FlushCache() if confidenceMapPerClass and classifier != 'GMM': for band in range(nClass): gdalBand = band + 1 out_confidenceMapPerClass = dst_confidenceMapPerClass.GetRasterBand( gdalBand) out_confidenceMapPerClass.SetNoDataValue(-1) out_confidenceMapPerClass.WriteArray( K[:, band].reshape(lines, cols), j, i) out_confidenceMapPerClass.FlushCache() del X, yp # Clean/Close variables if feedback == 'gui': progress.reset() raster = None dst_ds = None return outRaster
def molly_parameter(directory, objecto, flux_corrected=True, suffix=''): ''' Create two molly files with the information about each spectrum. Telescope options: Palomar 200in Wilson Campanas Lemmon WHT INT JKT UKIRT Kitt Peak AAT CTIO McDonald MMT VLT ANU 2.3m SAAO 1.9m NTT 'elsewhere' Parameters ------------- directory: Directory where all the science files are located. objecto: Name of the files to process flux_corrected: Plot the files that are flux corrected? suffix: Optional subset of files to plot, i.e. different standards. If not specified, all the files will be plotted. Output ------------- headerfile and listfile of molly files ''' # Import all the wavelength and flux corrected files if flux_corrected: fits_files = glob.glob("%s/%s*WaveStd*%s*.fits" % (directory, objecto, suffix)) # If not, import the files that are wavelength corrected, but not flux corrected. else: fits_files = glob.glob("%s/%s*Wave*.fits" % (directory, objecto)) # Text Files optimal_files = glob.glob("%s/*%s*%s*_optimal_final.txt" % (directory, objecto, suffix)) raw_files = glob.glob("%s/*%s*%s*_raw_final.txt" % (directory, objecto, suffix)) # Header of header output file header_data = "Object UTC Date RA DEC Dwell Airmass Equinox JD Day Month Year \n" header_data += " C D C D D D D D D I I I \n" # Empty list for list output file list_data = "" for i in range(len(optimal_files)): # Edges of the filename to find the filename a = optimal_files[i].find('/') # Name of the row name that molly needs filename = optimal_files[i][a + 1:] molly_name = "lasc %s %s 1 2 3 A M 0.05 \n" % (filename, i + 1) # Add to list of all files list_data += molly_name ##### Header output file ##### # For every variable and every file, extract the value listed here input_file = fits.open(fits_files[i]) objecto = input_file[0].header['OBJECT'] ut_time = input_file[0].header['UT'] date_obs = input_file[0].header['DATE-OBS'] ra_raw = input_file[0].header['RA'] dec_raw = input_file[0].header['DEC'] exptime = input_file[0].header['EXPTIME'] airmass = input_file[0].header['AIRMASS'] equinox = 2000.0 juliand = Time(date_obs + " " + ut_time).jd # Get the variables into the right format # Time hr = float(ut_time[0:2]) mi = float(ut_time[3:5]) se = float(ut_time[6:]) ut_out = hr + mi / 60.0 + se / 3600.0 # Date year = date_obs[0:4] month = date_obs[5:7] day = date_obs[8:10] date_out = str(day) + "/" + str(month) + "/" + str(year) # Right Ascension if ra_raw[0] == '+': rahr = float(ra_raw[1:3]) rami = float(ra_raw[4:6]) rase = float(ra_raw[7:]) ra_out = rahr + rami / 60.0 + rase / 3600.0 if ra_raw[0] == '-': rahr = float(ra_raw[1:3]) rami = float(ra_raw[4:6]) rase = float(ra_raw[7:]) ra_out = np.negative(rahr + rami / 60.0 + rase / 3600.0) # Declination if dec_raw[0] == '+': dechr = float(dec_raw[1:3]) decmi = float(dec_raw[4:6]) decse = float(dec_raw[7:]) dec_out = dechr + decmi / 60.0 + decse / 3600.0 if dec_raw[0] == '-': dechr = float(dec_raw[1:3]) decmi = float(dec_raw[4:6]) decse = float(dec_raw[7:]) dec_out = np.negative(dechr + decmi / 60.0 + decse / 3600.0) # One line for each file with all the variables in the right format molly_header = str(objecto) + " " + str(ut_out) + " " + str( date_out) + " " + str(ra_out) + " " + str(dec_out) + " " + str( exptime) + " " + str(airmass) + " " + str(equinox) + " " + str( juliand) + " " + str(day) + " " + str(month) + " " + str( year) + "\n" # Append to the big list of all headers header_data += molly_header # Save header output file headerfile_name = 'headerfile_%s%s.txt' % (objecto, suffix) headerfile = open(directory + '/' + headerfile_name, 'w') headerfile.write(header_data) print('Saved %s/headerfile_%s%s.txt' % (directory, objecto, suffix)) # Save list output file listfile_name = "listfile_" + objecto + suffix + ".txt" listfile = open(directory + '/' + listfile_name, "w") listfile.write(list_data) print("Saved " + "listfile_" + objecto + suffix + ".txt") # Create molly instructions file size = len(optimal_files) instructions = 'mxpix 4500 sure\n\n@%s.txt\n\nedit 1 %s\nfile\n%s\nq\n\nhfix 1 %s MMT\n\nvbin 1 %s 101\n\n\n\n\n' % ( listfile_name, size, headerfile_name, size, size) for i in range(len(optimal_files)): a = optimal_files[i].find('/') # Name of the row name that molly needs filename = optimal_files[i][a + 1:-9] + '_molly.txt' number = i + 101 instructions += 'wasc %s %s ANGSTROMS MJY\n' % (filename, number) instructions_name = "instructions_" + objecto + suffix + ".txt" instructions_file = open(directory + '/' + instructions_name, "w") instructions_file.write(instructions) print("Saved " + "instructions_" + objecto + suffix + ".txt")
# Hypothesis hTheta = sigmoid(theta * X) max_iter = 10000 # change the iteration value # max_iter = 2 cost = np.zeros((max_iter, 1)) for dummyCounter in range(max_iter): z = np.matmul(X, theta1) hypothesis = sigmoid(z) # 3163 X thetaSize # J(θ)= (−yTlog(h)−(1−y)Tlog(1−h))/m firstPart = np.matmul(np.transpose(yTrainingData), np.log(hypothesis)) secondPart = np.matmul(np.transpose(np.subtract(1, yTrainingData)), np.log(np.subtract(1, hypothesis))) cost[dummyCounter] = np.divide(np.negative(np.add(firstPart, secondPart)), dataToRead) print(cost[dummyCounter].reshape(-1)) gradient = np.matmul(np.transpose(X), np.subtract(hypothesis, yTrainingData)) theta1 = np.subtract(theta1, np.divide(np.multiply(alpha, gradient), dataToRead)) testDataStarts = 3168 # simply hardcoded testingData = np.array(df.iloc[testDataStarts:, 2]) yTestingData = np.array(df.iloc[testDataStarts:, 3]) totalTestData = 2006 # calculated manually yTestingData = yTestingData.reshape(totalTestData, 1)
def run(self, x, y=None): """ Runs the model for a batch of examples. The correct outputs `y` are known during training, but not at test time. If correct outputs `y` are provided, this method must construct and return a nn.Graph for computing the training loss. If `y` is None, this method must instead return predicted y-values. Inputs: x: a (batch_size x 1) numpy array y: a (batch_size x 1) numpy array, or None Output: (if y is not None) A nn.Graph instance, where the last added node is the loss (if y is None) A (batch_size x 1) numpy array of predicted y-values Note: DO NOT call backprop() or step() inside this method! """ "*** YOUR CODE HERE ***" #function nodes are, multiply, add vector, relu, matrix multiply, add vector #variables are w1, w2, b1, b2 #size of the input vector i = x.shape[1] #to test and modify h = 100 if not self.w1: self.w1 = nn.Variable(i, h) if not self.w2: self.w2 = nn.Variable(h, i) if not self.b1: self.b1 = nn.Variable(h) if not self.b2: self.b2 = nn.Variable(i) graph = nn.Graph([self.w1, self.w2, self.b1, self.b2]) input_nodeX = nn.Input(graph, x) neg_X = np.negative(x) neg_inputX = nn.Input(graph, neg_X) # print x.shape # xm = MatrixMultiply(graph, input_x, m) # xm_plus_b = MatrixVectorAdd(graph, xm, b) multiply1 = nn.MatrixMultiply(graph, input_nodeX, self.w1) add1 = nn.MatrixVectorAdd(graph, multiply1, self.b1) relu = nn.ReLU(graph, add1) multiply2 = nn.MatrixMultiply(graph, relu, self.w2) add2 = nn.MatrixVectorAdd(graph, multiply2, self.b2) #for the f(-x) neg_multiply1 = nn.MatrixMultiply(graph, neg_inputX, self.w1) neg_add1 = nn.MatrixVectorAdd(graph, neg_multiply1, self.b1) neg_relu = nn.ReLU(graph, neg_add1) neg_multiply2 = nn.MatrixMultiply(graph, neg_relu, self.w2) neg_add2 = nn.MatrixVectorAdd(graph, neg_multiply2, self.b2) ones = np.ones((1, 1)) ones = np.negative(ones) neg_one = nn.Input(graph, ones) neg_negate = nn.MatrixMultiply(graph, neg_add2, neg_one) final_add = nn.Add(graph, neg_negate, add2) if y is not None: # At training time, the correct output `y` is known. # Here, you should construct a loss node, and return the nn.Graph # that the node belongs to. The loss node must be the last node # added to the graph. input_nodeY = nn.Input(graph, y) loss_node = nn.SquareLoss(graph, final_add, input_nodeY) graph.add(loss_node) return graph "*** YOUR CODE HERE ***" else: # print graph.get_output(add2).shape return graph.get_output(final_add) # At test time, the correct output is unknown. # You should instead return your model's prediction as a numpy array "*** YOUR CODE HERE ***"