Example #1
1
 def state_integrate(self):
   rate = rospy.Rate(100)
   while not rospy.is_shutdown():
     rate.sleep()
     #For integration model
     Q_k = np.eye(9)*.2
     w = np.random.multivariate_normal(np.zeros([1,9])[0], Q_k)
     #For sensor measurement
     R_k = np.eye(3)*.01
     v = np.random.multivariate_normal(np.zeros([1,3])[0], R_k)
     #Predict
     s_k =  np.transpose(np.dot(self.A, self.s))
     P_k = np.add(np.dot(np.dot(self.A, self.P), np.transpose(self.A)), Q_k)
     #Update
     if self.update_unfilt:
       y_k = np.subtract(np.transpose(np.add(self.z, v)), np.dot(self.H, s_k))
       S_K = np.dot(np.dot(self.H, P_k), np.transpose(self.H))
       K = np.dot(P_k, np.dot(np.transpose(self.H), np.pinv(S_K)))
       self.s = np.add(s_k, np.dot(K, y_k))
       self.P = np.dot(np.subtract(np.eye(9), np.dot(K, self.H)), P_k)
       #This could result in a terrible race condition. But will need to test
       self.update_filt = False
     else:
       self.s = s_k
       self.P = P_k
Example #2
0
    def __init__(self, outputSlot, roi, minBlockShape, batchSize=None):
        self._outputSlot = outputSlot
        self._bigRoi = roi
        self._minBlockShape = minBlockShape
        
        if batchSize is None:
            batchSize=2

        # Align the blocking with the start of the roi
        offsetRoi = ([0] * len(roi[0]), numpy.subtract(roi[1], roi[0]))
        self._minBlockStarts = getIntersectingBlocks(minBlockShape, offsetRoi)
        self._minBlockStarts += roi[0] # Un-offset

        totalVolume = numpy.prod( numpy.subtract(roi[1], roi[0]) )
        # For now, simply iterate over the min blocks
        # TODO: Auto-dialate block sizes based on CPU/RAM usage.
        def roiGen():
            block_iter = self._minBlockStarts.__iter__()
            while True:
                block_start = block_iter.next()

                # Use offset blocking
                offset_block_start = block_start - self._bigRoi[0]
                offset_data_shape = numpy.subtract(self._bigRoi[1], self._bigRoi[0])
                offset_block_bounds = getBlockBounds( offset_data_shape, minBlockShape, offset_block_start )
                
                # Un-offset
                block_bounds = ( offset_block_bounds[0] + self._bigRoi[0],
                                 offset_block_bounds[1] + self._bigRoi[0] )
                logger.debug( "Requesting Roi: {}".format( block_bounds ) )
                yield block_bounds
        
        self._requestBatch = RoiRequestBatch( self._outputSlot, roiGen(), totalVolume, batchSize )
    def _executePredictionImage(self, roi, destination):
        # Determine intersecting blocks
        block_shape = self._getFullShape( self.BlockShape3dDict.value )
        block_starts = getIntersectingBlocks( block_shape, (roi.start, roi.stop) )
        block_starts = map( tuple, block_starts )

        # Ensure that block pipelines exist (create first if necessary)
        for block_start in block_starts:
            self._ensurePipelineExists(block_start)

        # Retrieve result from each block, and write into the approprate region of the destination
        # TODO: Parallelize this loop
        for block_start in block_starts:
            opBlockPipeline = self._blockPipelines[block_start]
            block_roi = opBlockPipeline.block_roi
            block_intersection = getIntersection( block_roi, (roi.start, roi.stop) )
            block_relative_intersection = numpy.subtract(block_intersection, block_roi[0])
            destination_relative_intersection = numpy.subtract(block_intersection, roi.start)
            
            destination_slice = roiToSlice( *destination_relative_intersection )
            req = opBlockPipeline.PredictionImage( *block_relative_intersection )
            req.writeInto( destination[destination_slice] )
            req.wait()

        return destination
def ratio_err(top,bottom,top_low,top_high,bottom_low,bottom_high):
    #uses simple propagation of errors (partial derivatives)
    #note it returns errorbars, not interval

    #-make sure input is numpy arrays-
    top = np.array(top)
    top_low = np.array(top_low)
    top_high = np.array(top_high)
    bottom = np.array(bottom)
    bottom_low = np.array(bottom_low)
    bottom_high = np.array(bottom_high)

    #-calculate errorbars-
    top_errlow = np.subtract(top,top_low)
    top_errhigh = np.subtract(top_high,top)
    bottom_errlow = np.subtract(bottom,bottom_low)
    bottom_errhigh = np.subtract(bottom_high,bottom)

    #-calculate ratio_low-
    ratio_low  = np.sqrt( np.square(np.divide(top_errlow,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errlow)) )
    #-calculate ratio_high-
    ratio_high = np.sqrt( np.square(np.divide(top_errhigh,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errhigh)) )
#    ratio_high = ((top_errhigh/bottom)**2.0 + (top/(bottom**2.0))*bottom_errhigh)**2.0)**0.5

    # return two vectors, err_low and err_high
    return ratio_low,ratio_high
 def SpringEnergy(self):
     
     total = 0.
     
     #Energy between the pinned, immobile weight and the first bead
     subtotal = 0.
     for j in xrange(len(self.beads[0])):
         subtotal += np.linalg.norm(np.subtract(self.w1[j],self.beads[0][j]),ord=2)/len(self.beads[0][j])
     total+=subtotal
     
     #Energy between mobile beads
     for i,b in enumerate(self.beads):
         if i < len(self.beads)-1:
             #print "Tallying energy between bead " + str(i) + " and bead " + str(i+1)
             subtotal = 0.
             for j in xrange(len(b)):
                 subtotal += np.linalg.norm(np.subtract(self.beads[i][j],self.beads[i+1][j]),ord=2)/len(self.beads[0][j])
             total+=subtotal
             
     #Energy between pinned, immobile final weights, and the last bead
     subtotal = 0.
     for j in xrange(len(self.beads[-1])):
         subtotal += np.linalg.norm(np.subtract(self.w2[j],self.beads[-1][j]),ord=2)/len(self.beads[0][j])
     total+=subtotal
     
     return total/len(self.beads)
Example #6
0
def convert_yuv420_to_rgb_image(y_plane, u_plane, v_plane,
                                w, h,
                                ccm_yuv_to_rgb=DEFAULT_YUV_TO_RGB_CCM,
                                yuv_off=DEFAULT_YUV_OFFSETS):
    """Convert a YUV420 8-bit planar image to an RGB image.

    Args:
        y_plane: The packed 8-bit Y plane.
        u_plane: The packed 8-bit U plane.
        v_plane: The packed 8-bit V plane.
        w: The width of the image.
        h: The height of the image.
        ccm_yuv_to_rgb: (Optional) the 3x3 CCM to convert from YUV to RGB.
        yuv_off: (Optional) offsets to subtract from each of Y,U,V values.

    Returns:
        RGB float-3 image array, with pixel values in [0.0, 1.0].
    """
    y = numpy.subtract(y_plane, yuv_off[0])
    u = numpy.subtract(u_plane, yuv_off[1]).view(numpy.int8)
    v = numpy.subtract(v_plane, yuv_off[2]).view(numpy.int8)
    u = u.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
    v = v.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
    yuv = numpy.dstack([y, u.reshape(w*h), v.reshape(w*h)])
    flt = numpy.empty([h, w, 3], dtype=numpy.float32)
    flt.reshape(w*h*3)[:] = yuv.reshape(h*w*3)[:]
    flt = numpy.dot(flt.reshape(w*h,3), ccm_yuv_to_rgb.T).clip(0, 255)
    rgb = numpy.empty([h, w, 3], dtype=numpy.uint8)
    rgb.reshape(w*h*3)[:] = flt.reshape(w*h*3)[:]
    return rgb.astype(numpy.float32) / 255.0
Example #7
0
	def evaluate_args((cutoff,)):
		feats = []
		#print cutoff
		if cutoff < 0:
			print "ARGH, UNDER BOUNDS!"
			cutoff = 0.0
		if cutoff > rate/2.0:
			print "ARGH, OVER BOUNDS!"
			cutoff = rate/2.0
		for mag, sig in zip(probe_magnitudes, probe_signals):
			result = eyefilter(sig, sampling_rate, cutoff=cutoff, order=order)
			feats.append(saccade_features(t, result, mag))
		
			#plt.subplot(2,1,1)
			#plt.plot(result)
		
		empiric_maxs, empiric_durations = zip(*feats)
		#plt.subplot(2,1,2)
		#plt.suptitle(cutoff)
		#plt.plot(probe_magnitudes, empiric_maxs)
		#plt.plot(probe_magnitudes, empiric_durations)
		#plt.show()
		
		velerr = np.subtract(empiric_maxs, mag_to_vel(probe_magnitudes))**2/mag_to_vel_scaler**2
		durerr = np.subtract(empiric_durations, mag_to_dur(probe_magnitudes))**2/mag_to_dur_scaler**2

		#print velerr, durerr, mag_to_dur_scaler, empiric_durations
		# TODO: Velocity and duration aren't necessarily commesurable
		return np.mean((velerr+durerr))#+durerr))
Example #8
0
def resolve_collision(m):
    # Calculate relative velocity
    rv = numpy.subtract(m.b.velocity, m.a.velocity)

    # Calculate relative velocity in terms of the normal direction
    velocity_along_normal = numpy.dot(rv, m.normal)

    # Do not resolve if velocities are separating
    if velocity_along_normal > 0:
        # print("Separating:", velocity_along_normal)
        # print("  Normal:  ", m.normal)
        # print("  Vel:     ", m.b.velocity, m.a.velocity)
        return False

    # Calculate restitution
    e = min(m.a.restitution, m.a.restitution)

    # Calculate impulse scalar
    j = -(1 + e) * velocity_along_normal
    j /= 1 / m.a.mass + 1 / m.b.mass

    # Apply impulse
    impulse = numpy.multiply(j, m.normal)

    # print("Before: ", m.a.velocity, m.b.velocity)
    m.a.velocity = numpy.subtract(m.a.velocity,
                                  numpy.multiply(1 / m.a.mass, impulse))
    m.b.velocity = numpy.add(m.b.velocity,
                             numpy.multiply(1 / m.b.mass, impulse))
    # print("After:  ", m.a.velocity, m.b.velocity)
    # print("  Normal:  ", m.normal)

    return True
def energy(x, y, z):
    ex = np.sqrt(np.sum(np.square(np.subtract(x,mean(x)))))
    ey = np.sqrt(np.sum(np.square(np.subtract(y,mean(y)))))
    ez = np.sqrt(np.sum(np.square(np.subtract(z,mean(z)))))
    
    e = (1/(3 * len(x))) * (ex + ey + ez)
    return e
Example #10
0
def normalize_layout(l):
    """Make sure all the spots in a layout are where you can click.

    Returns a copy of the layout with all spot coordinates are
    normalized to within (0.0, 0.98).

    """
    xs = []
    ys = []
    ks = []
    for (k, (x, y)) in l.items():
        xs.append(x)
        ys.append(y)
        ks.append(k)
    minx = np.min(xs)
    maxx = np.max(xs)
    try:
        xco = 0.98 / (maxx - minx)
        xnorm = np.multiply(np.subtract(xs, [minx] * len(xs)), xco)
    except ZeroDivisionError:
        xnorm = np.array([0.5] * len(xs))
    miny = np.min(ys)
    maxy = np.max(ys)
    try:
        yco = 0.98 / (maxy - miny)
        ynorm = np.multiply(np.subtract(ys, [miny] * len(ys)), yco)
    except ZeroDivisionError:
        ynorm = np.array([0.5] * len(ys))
    return dict(zip(ks, zip(map(float, xnorm), map(float, ynorm))))
Example #11
0
    def test_pi_ops_nat(self):
        idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
                          freq='M', name='idx')
        expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'],
                               freq='M', name='idx')

        self._check(idx, lambda x: x + 2, expected)
        self._check(idx, lambda x: 2 + x, expected)
        self._check(idx, lambda x: np.add(x, 2), expected)

        self._check(idx + 2, lambda x: x - 2, idx)
        self._check(idx + 2, lambda x: np.subtract(x, 2), idx)

        # freq with mult
        idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
                          freq='2M', name='idx')
        expected = PeriodIndex(['2011-07', '2011-08', 'NaT', '2011-10'],
                               freq='2M', name='idx')

        self._check(idx, lambda x: x + 3, expected)
        self._check(idx, lambda x: 3 + x, expected)
        self._check(idx, lambda x: np.add(x, 3), expected)

        self._check(idx + 3, lambda x: x - 3, idx)
        self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
Example #12
0
    def __init__(self, indices, vertices, normals, texcoords, material):
        """A triangle should not be created manually."""

        self.vertices = vertices
        """A (3, 3) float array for points in the triangle"""
        self.normals = normals
        """A (3, 3) float array with the normals for points in the triangle.
        If the triangle didn't have normals, they will be computed."""
        self.texcoords = texcoords
        """A tuple with (3, 2) float arrays with the texture coordinates
          for the points in the triangle"""
        self.material = material
        """If coming from an unbound :class:`collada.triangleset.TriangleSet`, contains a
          string with the material symbol. If coming from a bound
          :class:`collada.triangleset.BoundTriangleSet`, contains the actual
          :class:`collada.material.Effect` the triangle is bound to."""
        self.indices = indices
        """A (3, 3) int array with vertex indexes in the vertex array"""

        if self.normals is None:
            # generate normals
            vec1 = numpy.subtract(vertices[0], vertices[1])
            vec2 = numpy.subtract(vertices[2], vertices[0])
            vec3 = toUnitVec(numpy.cross(toUnitVec(vec2), toUnitVec(vec1)))
            self.normals = numpy.array([vec3, vec3, vec3])
def calc_spectral_kurtosis(data, win_size=2048):
    """
    Spectral skewness formula from:
    Thoman, Chris. Model-Based Classification Of Speech Audio. ProQuest, 2009.

    Parameters
    ----------
    data: audio array in mono

    win_size: analysis block size in samples

    Returns
    -------
    The spectral skewness for each window
    """
    # Get M[n]
    magnitudes = np.abs(compute_stft(data, win_size))

    # Get SCt and turn it into a matrix for easy calculation
    sc = calc_spectral_centroid(data, win_size)
    sc_matrix = np.tile(sc, (magnitudes.shape[0], 1))

    # Get F[n] and matricize it
    fk = generate_fft_bins(win_size)
    fk_matrix = np.transpose(np.tile(fk, (magnitudes.shape[1], 1)))

    # Get SPt
    ss = calc_spectral_spread(data, win_size)

    # Create the numerator and denominators
    numerator = np.sum(np.multiply(np.power(np.subtract(fk_matrix, sc_matrix), 4), magnitudes), axis=0)
    denominator = np.multiply(np.power(ss, 4), np.sum(magnitudes, axis=0))

    # Kurtosis incoming
    return np.subtract(np.divide(numerator, denominator), 3)
Example #14
0
    def test_parr_ops_errors(self, ng, box_with_array):
        idx = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
                          freq='M', name='idx')
        obj = tm.box_expected(idx, box_with_array)

        msg = r"unsupported operand type\(s\)"
        with pytest.raises(TypeError, match=msg):
            obj + ng

        with pytest.raises(TypeError):
            # error message differs between PY2 and 3
            ng + obj

        with pytest.raises(TypeError, match=msg):
            obj - ng

        with pytest.raises(TypeError):
            np.add(obj, ng)

        with pytest.raises(TypeError):
            np.add(ng, obj)

        with pytest.raises(TypeError):
            np.subtract(obj, ng)

        with pytest.raises(TypeError):
            np.subtract(ng, obj)
Example #15
0
    def test_standardizeSamples( self ):
        '''
        Test standardizeSamples() function calculates mean and deviation and 
        applies it properly to all samples of a given feature
        Note: Just testing the first feature here
        '''

        # Configure the data to be split evenly for the test
        self.mLearningAgent.sampleSlice( 0.5 )

        # Calculate average and standard deviation
        mSum = sum( self.mLearningAgent.X_train[:, 0] )
        mAvg = mSum / len( self.mLearningAgent.X_train[:, 0] )
        mStdDev = sum( np.square( np.subtract( 
            self.mLearningAgent.X_train[:, 0], mAvg ) ) )
        mStdDev = sqrt( fabs( 
            mStdDev / len( self.mLearningAgent.X_train[:, 0] ) ) )

        # Apply calculated average and standard deviation to samples
        mNorm = np.divide( np.subtract( self.mLearningAgent.X_train[:, 0],
                                        mAvg ), mStdDev )

        # Execute LearningAgent implementation
        self.mLearningAgent.standardizeSamples()

        # Assert local calculation matches with LearningAgent implementation
        self.assertTrue( fabs( sum(
            np.subtract( mNorm, self.mLearningAgent.X_train[:, 0] ) ) )
                         < 0.001 )
Example #16
0
    def _Gram(self, X):
        if X is self.X:
            if self.Gs_train is None:
                kernel_scalar = rbf_kernel(self.X, gamma=self.gamma)[:, :,
                                                                     newaxis,
                                                                     newaxis]
                delta = subtract(X.T[:, newaxis, :], self.X.T[:, :, newaxis])
                self.Gs_train = asarray(transpose(
                    2 * self.gamma * kernel_scalar *
                    (2 * self.gamma * (delta[:, newaxis, :, :] *
                                       delta[newaxis, :, :, :]).transpose(
                        (3, 2, 0, 1)) +
                        ((self.p - 1) - 2 * self.gamma *
                         _norm_axis_0(delta)[:, :, newaxis, newaxis]**2) *
                        eye(self.p)[newaxis, newaxis, :, :]), (0, 2, 1, 3)
                )).reshape((self.p * X.shape[0], self.p * self.X.shape[0]))
            return self.Gs_train

        kernel_scalar = rbf_kernel(X, self.X, gamma=self.gamma)[:, :,
                                                                newaxis,
                                                                newaxis]
        delta = subtract(X.T[:, newaxis, :], self.X.T[:, :, newaxis])
        return asarray(transpose(
            2 * self.gamma * kernel_scalar *
            (2 * self.gamma * (delta[:, newaxis, :, :] *
                               delta[newaxis, :, :, :]).transpose(
                (3, 2, 0, 1)) +
                ((self.p - 1) - 2 * self.gamma *
                 _norm_axis_0(delta).T[:, :, newaxis, newaxis]**2) *
                eye(self.p)[newaxis, newaxis, :, :]), (0, 2, 1, 3)
        )).reshape((self.p * X.shape[0], self.p * self.X.shape[0]))
def triangle_quality (pt1, pt2, pt3) :
    """Construct a quality index for a triangle
    
    The value of the index varie between 1 and infinity,
    1 being an equilateral triangle.
    
    Q = hmax P / (4 sqrt(3) S)
    where :
     - hmax, length of longest edge
     - P, perimeter
     - S, surface
    
    :Parameters:
     - `pt1` (Vector) - corner
     - `pt2` (Vector) - corner
     - `pt3` (Vector) - corner
    
    :Returns Type: float
    """
    e1 = norm(subtract(pt3,pt2) )
    e2 = norm(subtract(pt3,pt1) )
    e3 = norm(subtract(pt2,pt1) )
    
    P = (e1 + e2 + e3) / 2.
    hmax = max(e1,e2,e3)
    S2 = P * (P - e1) * (P - e2) * (P - e3)
    if S2 <= 0 :
        return 1e6
    else :
        return hmax * P / 2. / sqrt(3.) / sqrt(S2)
Example #18
0
def filter_LT(t, i, *, binsize=5):
    x, y = get1DPeak(t, i, interpolate=False, indicies=True, use="mode")

    #Y
    base = numpy.average(i[:x])
    i = numpy.subtract(i, base)
    i = numpy.divide(i[x:], numpy.max(i[x:]))
    #i = numpy.log(i)

    #X
    t = t[x:]
    t = numpy.subtract(t, t[0])

    i_f = []
    i_f_rms = []
    t_f = []
    for j in range(0, len(i), binsize):
        bin = i[j: j+binsize]
        if j > 0 and (numpy.average(bin) < 0 or numpy.average(bin) <= numpy.std(bin)):
            break
        i_f.append(numpy.average(bin))
        i_f_rms.append(numpy.std(bin))
        bin_t = t[j: j+binsize]
        t_f.append(numpy.average(bin_t))

    return t_f, i_f, i_f_rms
Example #19
0
def point_to_point_azimuth(point0, point1, out=None):
    """Azimuth of vector that joins two points.

    Parameters
    ----------
    (y0, x0) : tuple of array_like
    (y1, x1) : tuple of array_like
    out : array_like, optional
        An array to store the output. Must be the same shape as the output would
        have.

    Returns
    -------
    azimuth : array_like
        Azimuth of vector joining points; if *out* is provided, *v* will be
        equal to *out*.

    Examples
    --------
    >>> from landlab.grid.unstructured.base import point_to_point_azimuth
    >>> point_to_point_azimuth((0, 0), (1, 0))
    array([ 0.])
    >>> point_to_point_azimuth([(0, 1), (0, 1)], (1, 0))
    array([  0., -90.])
    >>> point_to_point_azimuth([(0, 1, 0), (0, 1, 2)], [(1, 1, 2), (0, 0, 4)])
    array([  0., -90.,  45.])
    """
    azimuth_in_rads = point_to_point_angle(point0, point1, out=out)
    if out is None:
        return (np.pi * .5 - azimuth_in_rads) * 180. / np.pi
    else:
        np.subtract(np.pi * .5, azimuth_in_rads, out=out)
        return np.multiply(out, 180. / np.pi, out=out)
def res_plot(true,pre,data):
	
	N = len(pre)
	iniarray = [1]*N

	ind = np.arange(N)  # the x locations for the groups
	width = 1       # the width of the bars
	
	for index in range(len(pre[0])):
		
		false_res = np.subtract(true,pre[:,index])
		false_res = np.absolute(false_res)
		true_res = np.subtract(iniarray,false_res)
		
		ax = plt.subplot(3,1,index+1)
		rects1 = ax.bar(ind, false_res, 1, color='#E92424')

		rects2 = ax.bar(ind, true_res, 1, color='#54D816')

		# add some
		
		ax.set_ylabel('trends')
		
		if index==0:
			ax.set_title('prediction result')
			ax.legend( (rects1[0], rects2[0]), ('wrong', 'correct'))
		ax.set_xticks(ind+width)
		ax.set_xticklabels( range(N) )
		
		
		plt.xlabel(models[index])
		

	plt.show()
Example #21
0
def compute_factors(signal_dict_by_tf_1, signal_dict_by_tf_2):
    keys = signal_dict_by_tf_1.keys()

    signal_1 = np.zeros(len(keys))
    signal_2 = np.zeros(len(keys))

    for idx, key in enumerate(keys):
        signal_1[idx] = sum(signal_dict_by_tf_1[key])
        signal_2[idx] = sum(signal_dict_by_tf_2[key])

    # Take log
    log_tc1 = np.log(signal_1)
    log_tc2 = np.log(signal_2)

    # Average
    average_log_tc = np.add(log_tc1, log_tc2) / 2

    # Filter
    filter_log_tc1 = log_tc1[~np.isnan(log_tc1)]
    filter_log_tc2 = log_tc2[~np.isnan(log_tc2)]
    filter_log_tc = average_log_tc[~np.isnan(average_log_tc)]

    # Subtract
    sub_tc1 = np.subtract(filter_log_tc1, filter_log_tc)
    sub_tc2 = np.subtract(filter_log_tc2, filter_log_tc)

    median_tc1 = np.median(sub_tc1)
    median_tc2 = np.median(sub_tc2)

    factor1 = np.exp(median_tc1)
    factor2 = np.exp(median_tc2)

    return factor1, factor2
def face_surface_2D (mesh, pos, fid, return_barycenter = False) :
	"""Compute surface of a polygonal convex face
	
	:Parameters:
	 - `mesh` (:class:`openalea.container.Topomesh`)
	 - `pos` (dict of (pid|array) ) - geometrical position of points in space
	 - `fid` (fid) - id of the face to consider
	 - `return_barycenter` (bool) - tells wether the function will return
	                                the barycenter of the face too
	
	:Returns Type: float or (float,array)
	"""
	bary = centroid(mesh,pos,2,fid)
	
	#compute triangle for each edge
	surface = 0.
	for eid in mesh.borders(2,fid) :
		pid1,pid2 = mesh.borders(1,eid)
		surface += abs(cross(subtract(pos[pid1],bary),
		                     subtract(pos[pid2],bary) ) )
	
	#return
	if return_barycenter :
		return surface / 2.,bary
	else :
		return surface / 2.
Example #23
0
def do_intersect(p1, p2, q1, q2):
	# s1 = p1 + tr, r = p2 - p1
	# s2 = q1 + us, s = q2 - q1
	r = np.subtract(p2, p1)
	s = np.subtract(q2, q1)
	rxs = np.cross(r, s)
	# if r x s = 0, s1 and s2 are parallel or colinear.
	if rxs == 0:
		# if parallel, (p - q) x r != 0
		if np.cross(np.subtract(p1, q1), r) != 0:
			return False
		else:
			# project onto x- and y-axis and check for overlap.
			return ((q1[0] >= p1[0] and q1[0] <= p2[0] or
				q2[0] >= p1[0] and q2[0] <= p2[0] or
				p1[0] >= q1[0] and p1[0] <= q2[0] or
				p2[0] >= q1[0] and p2[0] <= q2[0]) and
				(q1[1] >= p1[1] and q1[1] <= p2[1] or
					q2[1] >= p1[1] and q2[1] <= p2[1] or
					p1[1] >= q1[1] and p1[1] <= q2[1] or
					p2[1] >= q1[1] and p2[1] <= q2[1]));
	# s1 and s2 intersect where s1 = s2 where 0 <= t <= 1 and 0 <= u <= 1
	#   (p + tr) x s = (q + us) x s
	# p x s + tr x s = q x s + us x s
	#         tr x s = q x s - p x s
	#              t = (q - p) x s / r x s
	t = np.cross(np.subtract(q1, p1), s) / rxs
	u = np.cross(np.subtract(q1, p1), r) / rxs
	if 0 <= t and 1 >= t and 0 <= u and 1 >= u:
		return True
	else:
		return False
Example #24
0
 def __call__(self, values, clip=True, out=None):
     values = _prepare(values, clip=clip, out=out)
     np.multiply(values, np.log(self.exp + 1.), out=values)
     np.exp(values, out=values)
     np.subtract(values, 1., out=values)
     np.true_divide(values, self.exp, out=values)
     return values
Example #25
0
    def test_ufunc_coercions(self):
        idx = date_range('2011-01-01', periods=3, freq='2D', name='x')

        delta = np.timedelta64(1, 'D')
        for result in [idx + delta, np.add(idx, delta)]:
            assert isinstance(result, DatetimeIndex)
            exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
            tm.assert_index_equal(result, exp)
            assert result.freq == '2D'

        for result in [idx - delta, np.subtract(idx, delta)]:
            assert isinstance(result, DatetimeIndex)
            exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
            tm.assert_index_equal(result, exp)
            assert result.freq == '2D'

        delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
                          np.timedelta64(3, 'D')])
        for result in [idx + delta, np.add(idx, delta)]:
            assert isinstance(result, DatetimeIndex)
            exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
                                freq='3D', name='x')
            tm.assert_index_equal(result, exp)
            assert result.freq == '3D'

        for result in [idx - delta, np.subtract(idx, delta)]:
            assert isinstance(result, DatetimeIndex)
            exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
                                freq='D', name='x')
            tm.assert_index_equal(result, exp)
            assert result.freq == 'D'
                def dispatch_request(self, subject):

                    assert subject['method'] == 'train' or subject['method'] == 'accuracy'
                    if subject['method'] == 'train':
                        params = subject['params']
                        net = loads_net(params)
                        o_weights = np.copy(net.weights)
                        o_biases = np.copy(net.biases)
                        #mini_batch_size = 10 #TODO maybe allow this to come from params 
                        net.SGD(training_data, 1, 10, 0.5, evaluation_data=test_data, monitor_evaluation_accuracy=True)
                        delt_weights = np.subtract(net.weights, o_weights)
                        delt_biases = np.subtract(net.biases, o_biases)
                        send_net = {}
                        send_net['delt_weights'] = delt_weights
                        send_net['delt_biases'] = delt_biases
                        print('sending updates')
                        # send the deltas back to the server
                        #res = self.parent.request("updates", wait_for_response=True)
                        #print('res: ' + repr(res))
                        self.parent.request('updates', params=send_net, wait_for_response = False)
                        return 'round done'
                    elif subject['method'] == 'accuracy':
                        params = subject['params']
                        net = loads_net(params)
                        return net.accuracy(test_data)
Example #27
0
def clearShot(p1, p2, worldLines, worldPoints, agent):
	### YOUR CODE GOES BELOW HERE ###
	
	def minDistance(point):
		best = INFINITY
		for line in worldLines:
			current = minimumDistance(line, point)
			if current < best:
				best = current
		return best
	
	# Insurance check to avoid divide by zero error
	if distance(p1, p2) < EPSILON:
		return True
	# Fetch agent's max radius
	radius = agent.getMaxRadius()
	# Find the deltas in x and y, and scale them based on length of agent's max radius
	(dx, dy) = numpy.multiply(numpy.subtract(p2, p1), radius / distance(p1, p2))
	# Swap x and y and flip sign of one for perpendicular translation vector
	p = (dy, -dx)
	# Check edges of agent line of travel for collisions; add line if no collision
	if rayTraceWorld(numpy.add(p1, p), numpy.add(p2, p), worldLines) == None:
		if rayTraceWorld(numpy.subtract(p1, p), numpy.subtract(p2, p), worldLines) == None:
			if minDistance(p1) > radius and minDistance(p2) > radius:
				return True
	
	### YOUR CODE GOES ABOVE HERE ###
	return False
def parabola_fit(filter_cost_path, cost_path, start, end):
  '''
  Returns parabolic fit of filtered cost path, as well as residuals
  :parameters:
    - filter_cost_path : numpy.ndarray
      The cost path median filtered (truncated to best fit parabolic fit)
    - cost_path : numpy.ndarray
      Original cost path, vector of values of cells accessed in alignment path
    - start : int
      Index to start truncated form of cost_path at to best aligned with the filtered form
    - end : int
      Index to end truncated form of cost_path at to best aligned with the filtered form

  :returns:
    - parab : numpy.ndarray
      Parabola of best fit
    - residuals_filt : numpy.ndarray
      Residuals created by subtracting filtered cost path and parab
    - res_original : numpy.ndarray
      Residuals created by subtracting original cost path and parab
  '''
  x = np.arange(start = 0, stop = filter_cost_path.shape[0])
  p = np.polyfit(x =x, y =filter_cost_path, deg = 2)
  # build residuals because apparently numpy just gives the sum of them, and actual parabola because why not
  parab = p[2]+p[1]*x+p[0]*x**2

  # residuals = np.zeros(x.shape)
  residuals_filt = np.subtract(filter_cost_path, parab)
  res_original = np.subtract(cost_path[start:end], parab)
  # for i in xrange(residuals.shape[0]):
  #   residuals[i] = cost_path[i]-parab[i]
  return parab, residuals_filt, res_original
Example #29
0
    def createMesh(self):

        info = "\n\nChannel mesh:\n"

        eID = 1
        for pID in range(len(self.proMesh)-1):
            pID += 1
            for nID in range(len(self.proMesh[pID])-1):
                a1 = self.proMesh[pID][nID]
                a2 = self.proMesh[pID][nID+1]
                b1 = self.proMesh[pID+1][nID]
                b2 = self.proMesh[pID+1][nID+1]

                d1 = np.linalg.norm(np.subtract(self.nodMesh[b1], self.nodMesh[a2]))
                d2 = np.linalg.norm(np.subtract(self.nodMesh[b2], self.nodMesh[a1]))

                if d1 < d2:
                    self.mesh[eID] = [a1, a2, b1]
                    eID += 1
                    self.mesh[eID] = [b1, a2, b2]
                    eID += 1
                else:
                    self.mesh[eID] = [b1, a1, b2]
                    eID += 1
                    self.mesh[eID] = [b2, a1, a2]
                    eID += 1

        info += " - Nodes:\t{0}\n".format(len(self.nodMesh))
        info += " - Elements:\t{0}\n".format(eID-1)

        return info
Example #30
0
def getGammaAngle(appf,cAtom,oAtom,hAtom):
    # first determine the nAtom
    aminoGroup = appf.select('resnum ' + str(cAtom.getResnum()))
    for at in aminoGroup:
        if(at.getName() == 'N'):
            nAtom = at
        # get coordinates
    cCoords = cAtom.getCoords()
    oCoords = oAtom.getCoords()
    hCoords = hAtom.getCoords()
    nCoords = nAtom.getCoords()
    # get necessary vectors
    oc = np.subtract(oCoords,cCoords)
    nc = np.subtract(nCoords,cCoords)
    ho = np.subtract(hCoords,oCoords)
    n1 = np.cross(oc,nc)
    n1_unit = np.divide(n1,np.linalg.norm(n1))
    # get projection of H-O in O-C direction
    oc_unit = np.divide(oc,np.linalg.norm(oc))
    #print oc_unit
    hproj = np.dot(ho,oc_unit)
    # get projection of H-O onto N-C-O plane
    out = np.dot(ho,n1_unit)
    n2 = np.cross(np.multiply(n1_unit,out),oc)
    #print n2
    ho_ip = np.subtract(ho,np.multiply(n1_unit,out))
    test = np.dot(n2,ho_ip)
    #print test
    ang = hproj/np.linalg.norm(ho_ip)
    ang = math.acos(ang)
    ang = ang*180/math.pi
    #if(test < 0):
    #    ang = ang * -1
    return ang
Example #31
0
def approx_linear_regression(n_procs, n_samples, n_features, input_dir,
                             n_stragglers, is_real_data, params, num_collect,
                             add_delay, update_rule):

    assert update_rule in ('GD', 'AGD')

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    __num_worker_gather = num_collect
    n_workers = n_procs - 1

    if (n_workers % (n_stragglers + 1)):
        print("Error: n_workers must be multiple of n_stragglers+1!")
        sys.exit(0)

    rounds = params[0]

    #beta=np.zeros(n_features)
    beta = np.random.randn(n_features)

    #rows_per_worker=n_samples/(n_procs-1)
    rows_per_worker = n_samples // (n_procs - 1)
    n_groups = n_workers / (n_stragglers + 1)

    # Loading the data
    if (rank):

        if not is_real_data:

            X_current = np.zeros(
                ((1 + n_stragglers) * rows_per_worker, n_features))
            y_current = np.zeros((1 + n_stragglers) * rows_per_worker)
            y = load_data(input_dir + "label.dat")

            for i in range(1 + n_stragglers):
                a = (rank - 1) / (n_stragglers + 1)  # index of group
                b = (rank - 1) % (n_stragglers + 1
                                  )  # position inside the group
                idx = int((n_stragglers + 1) * a + (b + i) %
                          (n_stragglers + 1))

                X_current[i * rows_per_worker:(i + 1) *
                          rows_per_worker, :] = load_data(input_dir +
                                                          str(idx + 1) +
                                                          ".dat")
                y_current[i * rows_per_worker:(i + 1) *
                          rows_per_worker] = y[idx *
                                               rows_per_worker:(idx + 1) *
                                               rows_per_worker]

        else:

            y_current = np.zeros((1 + n_stragglers) * rows_per_worker)
            y = load_data(input_dir + "label.dat")
            for i in range(1 + n_stragglers):
                a = (rank - 1) / (n_stragglers + 1)  # index of group
                b = (rank - 1) % (n_stragglers + 1
                                  )  # position inside the group
                idx = int((n_stragglers + 1) * a + (b + i) %
                          (n_stragglers + 1))

                if i == 0:
                    X_current = load_sparse_csr(input_dir + str(idx + 1))
                else:
                    X_temp = load_sparse_csr(input_dir + str(idx + 1))
                    X_current = sps.vstack((X_current, X_temp))
                y_current[i * rows_per_worker:(i + 1) *
                          rows_per_worker] = y[idx *
                                               rows_per_worker:(idx + 1) *
                                               rows_per_worker]

    # Initializing relevant variables
    if (rank):

        predy = X_current.dot(beta)
        #g = -X_current.T.dot(np.divide(y_current,np.exp(np.multiply(predy,y_current))+1))

        g = -2 * X_current.T.dot(y_current - predy)
        send_req = MPI.Request()
        recv_reqs = []

    else:

        msgBuffers = [np.zeros(n_features) for i in range(n_procs - 1)]
        g = np.zeros(n_features)
        betaset = np.zeros((rounds, n_features))
        timeset = np.zeros(rounds)
        worker_timeset = np.zeros((rounds, n_procs - 1))

        request_set = []
        recv_reqs = []
        send_set = []

        cnt_groups = 0
        completed_groups = np.ndarray(n_groups, dtype=bool)
        completed_workers = np.ndarray(n_procs - 1, dtype=bool)

        status = MPI.Status()

        eta0 = params[2]  # ----- learning rate
        alpha = params[1]  # --- coefficient of l2 regularization
        utemp = np.zeros(n_features)  # for accelerated gradient descent

    # Posting all Irecv requests for master and workers
    if (rank):

        for i in range(rounds):
            req = comm.Irecv([beta, MPI.DOUBLE], source=0, tag=i)
            recv_reqs.append(req)
    else:

        for i in range(rounds):
            recv_reqs = []
            for j in range(1, n_procs):
                req = comm.Irecv([msgBuffers[j - 1], MPI.DOUBLE],
                                 source=j,
                                 tag=i)
                recv_reqs.append(req)
            request_set.append(recv_reqs)

    ###########################################################################################
    comm.Barrier()
    if rank == 0:
        print("---- Starting Approx Coding Iterations for " +
              str(n_stragglers) + " stragglers" + "simulated delay " +
              str(add_delay) + "-------")
        orig_start_time = time.time()

    for i in range(rounds):
        if rank == 0:

            if (i % 10 == 0):
                print("\t >>> At Iteration %d" % (i))

            send_set[:] = []
            g[:] = 0
            completed_groups[:] = False
            completed_workers[:] = False
            cnt_groups = 0
            cnt_workers = 0

            start_time = time.time()

            # bcast model step
            for l in range(1, n_procs):
                sreq = comm.Isend([beta, MPI.DOUBLE], dest=l, tag=i)
                send_set.append(sreq)

            while (cnt_workers < __num_worker_gather) and (cnt_groups <
                                                           n_groups):
                req_done = MPI.Request.Waitany(request_set[i], status)
                src = status.Get_source()
                worker_timeset[i, src - 1] = time.time() - start_time
                request_set[i].pop(req_done)

                completed_workers[src - 1] = True
                groupid = (src - 1) / (n_stragglers + 1)
                #g += msgBuffers[src-1]
                cnt_workers += 1

                if (not completed_groups[groupid]):
                    completed_groups[groupid] = True
                    g += msgBuffers[src - 1]
                    cnt_groups += 1

            grad_multiplier = eta0[i] / n_samples
            # ---- update step for gradient descent
            if update_rule == "GD":
                np.subtract((1 - 2 * alpha * eta0[i]) * beta,
                            grad_multiplier * g,
                            out=beta)
            elif update_rule == "AGD":
                # ---- updates for accelerated gradient descent
                theta = 2.0 / (i + 2.0)
                ytemp = (1 - theta) * beta + theta * utemp
                betatemp = ytemp - grad_multiplier * g - (2 * alpha *
                                                          eta0[i]) * beta
                utemp = beta + (betatemp - beta) * (1 / theta)
                beta[:] = betatemp
            else:
                raise Exception("Error update rule")

            timeset[i] = time.time() - start_time

            betaset[i, :] = beta
            ind_set = [
                l for l in range(1, n_procs) if not completed_workers[l - 1]
            ]
            for l in ind_set:
                worker_timeset[i, l - 1] = -1

            MPI.Request.Waitall(send_set)
            MPI.Request.Waitall(request_set[i])

        else:

            recv_reqs[i].Wait()

            sendTestBuf = send_req.test()
            if not sendTestBuf[0]:
                send_req.Cancel()
                #print("Worker " + str(rank) + " cancelled send request for Iteration " + str(i))

            predy = X_current.dot(beta)
            #g = X_current.T.dot(np.divide(y_current,np.exp(np.multiply(predy,y_current))+1))
            #g *= -1
            g = X_current.T.dot(y_current - predy)
            g *= -2
            ########################################## straggler simulation ###################################################
            if add_delay == 1:
                np.random.seed(seed=i)
                #straggler_indices = np.random.choice([t for t in range(1, n_workers+1)], n_stragglers, replace=False)
                #if rank in straggler_indices:
                #    time.sleep(time_sleep)
                artificial_delays = np.random.exponential(0.5, n_workers)
                delay = artificial_delays[rank - 1]
                time.sleep(delay)
            ###################################################################################################################
            send_req = comm.Isend([g, MPI.DOUBLE], dest=0, tag=i)

    #############################################################################################
    comm.Barrier()
    if rank == 0:
        elapsed_time = time.time() - orig_start_time
        print("Total Time Elapsed: %.3f" % (elapsed_time))
        # Load all training data
        if not is_real_data:
            X_train = load_data(input_dir + "1.dat")
            for j in range(2, n_procs - 1):
                X_temp = load_data(input_dir + str(j) + ".dat")
                X_train = np.vstack((X_train, X_temp))
        else:
            X_train = load_sparse_csr(input_dir + "1")
            for j in range(2, n_procs - 1):
                X_temp = load_sparse_csr(input_dir + str(j))
                X_train = sps.vstack((X_train, X_temp))

        y_train = load_data(input_dir + "label.dat")
        y_train = y_train[0:X_train.shape[0]]

        # Load all testing data
        y_test = load_data(input_dir + "label_test.dat")
        if not is_real_data:
            X_test = load_data(input_dir + "test_data.dat")
        else:
            X_test = load_sparse_csr(input_dir + "test_data")

        n_train = X_train.shape[0]
        n_test = X_test.shape[0]

        training_loss = np.zeros(rounds)
        testing_loss = np.zeros(rounds)
        auc_loss = np.zeros(rounds)

        from sklearn.metrics import roc_curve, auc

        for i in range(rounds):
            beta = np.squeeze(betaset[i, :])
            predy_train = X_train.dot(beta)
            predy_test = X_test.dot(beta)
            training_loss[i] = calculate_mse(y_train, predy_train, n_train)
            testing_loss[i] = calculate_mse(y_test, predy_test, n_test)

            # TODOs: for linear regressiuon there is no fp tp any more, change to loss
            #fpr, tpr, thresholds = roc_curve(y_test,predy_test, pos_label=1)
            #auc_loss[i] = auc(fpr,tpr)
            print(
                "Iteration %d: Train Loss = %.6f, Test Loss = %.6f, Total time taken =%5.3f"
                % (i, training_loss[i], testing_loss[i], timeset[i]))

        output_dir = input_dir + "results/"
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        #save_vector(training_loss, output_dir+"naive_acc_training_loss.dat")
        #save_vector(testing_loss, output_dir+"naive_acc_testing_loss.dat")
        #save_vector(auc_loss, output_dir+"naive_acc_auc.dat")
        #save_vector(timeset, output_dir+"naive_acc_timeset.dat")
        #save_matrix(worker_timeset, output_dir+"naive_acc_worker_timeset.dat")
        print(">>> Done")

    comm.Barrier()
Example #32
0
def set_up_dataloaders(model_expected_input_size,
                       dataset_folder,
                       batch_size,
                       workers,
                       disable_dataset_integrity,
                       enable_deep_dataset_integrity,
                       inmem=False,
                       **kwargs):
    """
    Set up the dataloaders for the specified datasets.

    Parameters
    ----------
    model_expected_input_size : tuple
        Specify the height and width that the model expects.
    dataset_folder : string
        Path string that points to the three folder train/val/test. Example: ~/../../data/svhn
    batch_size : int
        Number of datapoints to process at once
    workers : int
        Number of workers to use for the dataloaders
    inmem : boolean
        Flag: if False, the dataset is loaded in an online fashion i.e. only file names are stored and images are loaded
        on demand. This is slower than storing everything in memory.

    Returns
    -------
    train_loader : torch.utils.data.DataLoader
    val_loader : torch.utils.data.DataLoader
    test_loader : torch.utils.data.DataLoader
        Dataloaders for train, val and test.
    int
        Number of classes for the model.
    """

    # Recover dataset name
    dataset = os.path.basename(os.path.normpath(dataset_folder))
    logging.info('Loading {} from:{}'.format(dataset, dataset_folder))

    ###############################################################################################
    # Load the dataset splits as images
    try:
        logging.debug("Try to load dataset as images")
        train_ds, val_ds, test_ds = image_folder_dataset.load_dataset(
            dataset_folder, inmem, workers)

        # Loads the analytics csv and extract mean and std
        mean, std = _load_mean_std_from_file(dataset_folder, inmem, workers)

        # Set up dataset transforms
        logging.debug('Setting up dataset transforms')
        # TODO: Cropping not resizing needed.

        transform = transforms.Compose([
            transforms.RandomVerticalFlip(),
            transforms.RandomHorizontalFlip(),
            transforms.RandomNineCrop(model_expected_input_size),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda items: torch.stack([
                transforms.Normalize(mean=mean, std=std)(item)
                for item in items
            ]))
        ])

        transform_test = transforms.Compose([
            transforms.Resize(model_expected_input_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])

        # transform_test = transforms.Compose([
        #     transforms.RandomNineCrop(model_expected_input_size),
        #     transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
        #     transforms.Lambda(
        #         lambda items: torch.stack([transforms.Normalize(mean=mean, std=std)(item) for item in items]))
        # ])

        train_ds.transform = transform
        val_ds.transform = transform_test
        test_ds.transform = transform_test

        train_loader, val_loader, test_loader = _dataloaders_from_datasets(
            batch_size, train_ds, val_ds, test_ds, workers)
        logging.info("Dataset loaded as images")
        _verify_dataset_integrity(dataset_folder, disable_dataset_integrity,
                                  enable_deep_dataset_integrity)
        return train_loader, val_loader, test_loader, len(train_ds.classes)

    except RuntimeError:
        logging.debug("No images found in dataset folder provided")

    ###############################################################################################
    # Load the dataset splits as bidimensional
    try:
        logging.debug("Try to load dataset as bidimensional")
        train_ds, val_ds, test_ds = bidimensional_dataset.load_dataset(
            dataset_folder)

        # Loads the analytics csv and extract mean and std
        # TODO: update bidimensional to work with new load_mean_std functions
        mean, std = _load_mean_std_from_file(dataset_folder, inmem, workers)

        # Bring mean and std into range [0:1] from original domain
        mean = np.divide((mean - train_ds.min_coords),
                         np.subtract(train_ds.max_coords, train_ds.min_coords))
        std = np.divide((std - train_ds.min_coords),
                        np.subtract(train_ds.max_coords, train_ds.min_coords))

        # Set up dataset transforms
        logging.debug('Setting up dataset transforms')
        transform = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize(mean=mean, std=std)])

        train_ds.transform = transform
        val_ds.transform = transform
        test_ds.transform = transform

        train_loader, val_loader, test_loader = _dataloaders_from_datasets(
            batch_size, train_ds, val_ds, test_ds, workers)
        logging.info("Dataset loaded as bidimensional data")
        _verify_dataset_integrity(dataset_folder, disable_dataset_integrity,
                                  enable_deep_dataset_integrity)
        return train_loader, val_loader, test_loader, len(train_ds.classes)

    except RuntimeError:
        logging.debug("No bidimensional found in dataset folder provided")

    ###############################################################################################
    # Verify that eventually a dataset has been correctly loaded
    logging.error(
        "No datasets have been loaded. Verify dataset folder location or dataset folder structure"
    )
    sys.exit(-1)
Example #33
0
def rpca_alm(M,
             gamma=None,
             tol=1e-7,
             maxiter=500,
             verbose=True,
             use_rand_svd=False):
    """
	Finds the Principal Component Pursuit solution 
	# %
	minmize		 ||L||_* + gamma || S ||_1 
	# %
	subject to	  L + S = M
	# %
	using an augmented Lagrangian approach
	# %
	Usage:  L,S,iter  = rpca_alm(M,gamma=None,tol=1e-7,maxiter=500)
	# %
	Inputs:
	# %
	M	   - input matrix of size n1 x n2 
	# %
	gamma  - parameter defining the objective functional 

	tol	 - algorithm stops when ||M - L - S||_F <= delta ||M||_F 
	# %
	maxiter - maximum number of iterations
	# %
	Outputs: 

	L		- low-rank component

	S		- sparse component
	# %
	iter	 - number of iterations to reach convergence

	Reference:
	# %
	   Candes, Li, Ma, and Wright 
	   Robust Principal Component Analysis? 
	   Submitted for publication, December 2009.
	# %
	Written by: Alex Papanicolaou
	Created: January 2015"""
    n = M.shape
    Frob_norm = LA.norm(M, 'fro')
    two_norm = LA.norm(M, 2)
    one_norm = np.sum(np.abs(M))
    inf_norm = np.max(np.abs(M))

    if gamma is None:
        gamma = 1 / np.sqrt(np.max(n))

    K = 1
    if verbose and isinstance(verbose, int):
        K = verbose

    mu_inv = 4 * one_norm / np.prod(n)

    # Kicking
    k = np.min(
        [np.floor(mu_inv / two_norm),
         np.floor(gamma * mu_inv / inf_norm)])
    Y = k * M
    sv = 10

    # Variable init
    zero_mat = np.zeros(n)
    S = zero_mat.copy()
    L = zero_mat.copy()
    R = M.copy()
    T1 = zero_mat.copy()
    T2 = zero_mat.copy()

    np.multiply(Y, mu_inv, out=T1)
    np.add(T1, M, out=T1)

    for k in range(maxiter):
        # Shrink entries
        np.subtract(T1, L, out=T2)
        S = vector_shrink(T2, gamma * mu_inv, out=S)

        # Shrink singular values
        np.subtract(T1, S, out=T2)
        L, r = matrix_shrink(T2, mu_inv, sv, out=L, use_rand_svd=use_rand_svd)

        if r < sv:
            sv = np.min([r + 1, np.min(n)])
        else:
            sv = np.min([r + np.round(0.05 * np.min(n)), np.min(n)])

        np.subtract(M, L, out=R)
        np.subtract(R, S, out=R)
        stopCriterion = LA.norm(R, 'fro') / Frob_norm

        if verbose and k % K == 0:
            print "iter: {0}, rank(L) {1}, |S|_0: {2}, stopCriterion {3}".format(
                k, r, np.sum(np.abs(S) > 0), stopCriterion)

        # Check convergence
        if stopCriterion < tol:
            break

        # Update dual variable
        np.multiply(R, 1. / mu_inv, out=T2)
        np.add(T2, Y, out=Y)
        # Y += R/mu_inv

        np.add(T1, R, out=T1)

    niter = k + 1
    if verbose:
        print "iter: {0}, rank(L) {1}, |S|_0: {2}, stopCriterion {3}".format(
            k, r, np.sum(np.abs(S) > 0), stopCriterion)

    return (L, S, niter)
Example #34
0
def vector_shrink(X, tau, out=None):
    np.absolute(X, out=out)
    np.subtract(out, tau, out=out)
    np.maximum(out, 0.0, out=out)
    return np.multiply(np.sign(X), out, out=out)
Example #35
0
print(a1 + a2)
print()

a3 = np.arange(1, 4).reshape(3, 1)
print(a3)
print(a1 + a3)
print()

# 산술 연산

a1 = np.arange(1, 10)
print(a1)
print(a1 + 1)
print(np.add(a1, 10)) # 더하기 해주는 함수
print(a1 - 2)
print(np.subtract(a2, 10)) # 빼기 해주는함수
print(-a1)
print(np.negative(a1)) # 양수 => 음수, 음수 => 양수
print(a1 * 2)
print(np.multiply(a1, 2)) # 곱하기 해주는 함수
print(a1 / 2)
print(np.divide(a1, 2)) # 나누기 해주는 함수
print(a1 // 2)
print(np.floor_divide(a1, 2)) # 나누고 소수점을 내림(몫)
print(a1 ** 2)
print(np.power(a1, 2)) # 거듭제곱 해주는 함수
print(a1 % 2) 
print(np.mod(a1, 2)) # 나누고 나머지 알려주는 함수
print()

a1 = np.arange(1, 10)
Example #36
0
    def train(self, traintype, **kwargs):
        # {{{
        '''
        Passes the dataset and any kwargs into the appropriate training set optimization.
        '''
        if 'loss' in kwargs:
            loss = kwargs['loss']
        else:
            loss = 'neg_mean_squared_error' # erratic for LiF
#            loss = 'neg_mean_absolute_error' # erratic
#            loss = 'explained_variance' # smooth but wrong for LiF
#            loss = 'neg_median_absolute_error' # erratic
            # skl default is 'r2' but it's terrible
        if 'kernel' in kwargs:
            kernel = kwargs['kernel']
        else:
            kernel = 'rbf'

        if traintype.lower() in ["krr","kernel_ridge"]:
            # {{{
            print("Training via the {} algorithm . . .".format(traintype))
#            ds, t_AVG = krr.train(self, **kwargs) 
            from sklearn.kernel_ridge import KernelRidge
            from sklearn.model_selection import GridSearchCV

            t_REPS = [self.grand['representations'][tr] for tr in self.data['trainers']]
            t_VALS = [self.grand['values'][tr] for tr in self.data['trainers']]
            t_AVG = np.mean(t_VALS)
            t_VALS = np.subtract(t_VALS,t_AVG)

            # get the hypers s(igma) = kernel width and l(ambda) = regularization
            # NOTE: while my input file uses "s" and "l", skl treats these
            # as "gamma" and "alpha" where gamma = 1/(2*s**2) and alpha = l
            # TODO: Depending on the kernel, s/gamma may not be necessary
            if self.data['hypers']:
                print("Loading hyperparameters from Dataset.")
                gamma = 1.0 / 2.0 / self.data['s']**2 
                alpha = self.data['l'] 
                krr = KernelRidge(kernel=kernel,alpha=alpha,gamma=gamma)
            else:
                krr = KernelRidge(kernel=kernel)
                if 'k' in kwargs:
                    k = kwargs['k']
                else:
                    k = self.setup['M']
                parameters = {'alpha':np.logspace(-12,12,num=50),
                              'gamma':np.logspace(-12,12,num=50)}
                krr_regressor = GridSearchCV(krr,parameters,scoring=loss,cv=k)
                krr_regressor.fit(t_REPS,t_VALS)
                self.data['hypers'] = True
                self.data['s'] = 1.0 / (2*krr_regressor.best_params_['gamma'])**0.5
                self.data['l'] = krr_regressor.best_params_['alpha']
                krr = krr_regressor.best_estimator_
        
            # train for a(lpha) = regression coefficients
            # NOTE: I call the coeffs "a" while skl uses "dual_coef"
            if self.data['a']:
                print("Loading coefficients from Dataset.")
                alpha = np.asarray(self.data['a'])
                krr.dual_coef_ = alpha
            else:
                print("Model training using s = {} and l = {} . . .".format(self.data['s'],self.data['l']))
                krr.fit(t_REPS,t_VALS)
                self.data['a'] = list(krr.dual_coef_)
            return self, t_AVG
            # }}}

        elif traintype.lower() in ["home_krr","home_kernel_ridge"]:
            print("Training via the {} algorithm . . .".format(traintype))
            print("NOTE: This algorithm only includes the radial basis function with a custom loss function.")
            ds, t_AVG = krr.train(self, **kwargs) 
            return ds, t_AVG

        else:
            raise RuntimeError("Cannot train using {} yet.".format(traintype))
Example #37
0
#!/usr/bin/env python
# coding: utf-8

# In[1]:

import numpy as np
x = np.array([1, 2, 3, 4])
y = np.array([5.5, 6.5, 7.5, 8.5])
print("add:", np.add(x, y))
print("subtract:", np.subtract(x, y))
print("multiply:", np.multiply(x, y))
print("division:", np.divide(x, y))

# In[4]:

X = np.array([[1, 2], [3, 4]])
sorted_columns = np.sort(X, axis=0)
print("max ", X.max())
print("min:", X.min())
print("median:", np.median(X))
print("square root", np.sqrt(X))
print("mean:", X.mean())
print("standard deviation:", X.std())
print("exponent:", np.exp(X))

# In[ ]:
Example #38
0
def runGraphFaster(video_file_name, input_tensor, output_tensor, labels, session, session_name):
    # Performs inference using the passed model parameters. 
    global flagfound
    global n
    n = 0
    results = []

    # setup pointer to video file
    if args.deinterlace == True:
        deinterlace = 'yadif'
    else:
        deinterlace = ''

    # Let's get the video meta data
    video_filename, video_file_extension = path.splitext(path.basename(video_file_name))
    video_metadata = getinfo(video_file_name)
    num_seconds = int(float(video_metadata['streams'][0]['duration']))
    num_of_frames = int(float(video_metadata['streams'][0]['duration_ts']))
    video_width = int(video_metadata['streams'][0]['width'])
    video_height = int(video_metadata['streams'][0]['height'])
    
    # let's get the real FPS as we don't want duplicate frames!
    effective_fps = int(num_of_frames / num_seconds)
    if effective_fps > int(args.fps):
        effective_fps = int(args.fps)
        num_of_frames = num_seconds * int(args.fps)
    
    source_frame_size = str(video_width) + 'x' + str(video_height)
    target_frame_size = args.width + 'x' + args.height

    if(args.training == True):
        frame_size = source_frame_size
    else:
        frame_size = target_frame_size
        video_width = int(args.width)
        video_height = int(args.height)

    print(' ')
    print('Procesing ' + str(num_seconds) + ' seconds of ' + video_filename + ' at ' + str(effective_fps) + 
        ' frame(s) per second with ' + frame_size + ' source frame size.')
    command = [
        FFMPEG_PATH, '-i', video_file_name,
        '-vf', 'fps=' + args.fps, '-r', args.fps, '-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr',
        '-hide_banner', '-loglevel', '0', '-vf', deinterlace, '-f', 'image2pipe', '-vf', 'scale=' + frame_size, '-'
    ]
    image_pipe = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=4*1024*1024)

    # setup the input and output tensors
    output_tensor = sess1.graph.get_tensor_by_name(session_name + '/' + output_tensor)
    input_tensor = sess1.graph.get_tensor_by_name(session_name + '/' + input_tensor)

    # count the number of labels
    top_k = []
    for i in range(0, len(labels)):
        top_k.append(i)

    while True:
        # read next frame
        raw_image = image_pipe.stdout.read(int(video_width)*int(video_height)*3)
        if not raw_image:
            break # stop processing frames EOF!
        else:
            # Run model and get predictions
            processed_image = np.frombuffer(raw_image, dtype='uint8')
            processed_image = processed_image.reshape((int(video_width), int(video_height), 3))
            
            if frame_size != target_frame_size:
                # we need to fix the frame size so the model does not panic!
                fixed_image = Image.frombytes('RGB', (int(video_width), int(video_height)), processed_image)
                fixed_image = fixed_image.resize((int(args.width), int(args.height)), PIL.Image.ANTIALIAS)
                fixed_image = np.expand_dims(fixed_image, 0)
                final_image = np.divide(np.subtract(fixed_image, [0]), [255])
            else:
                processed_image = processed_image.astype(float)
                processed_image = np.expand_dims(processed_image, 0)
                final_image = np.divide(np.subtract(processed_image, [0]), [255])

            predictions = session.run(output_tensor, {input_tensor: final_image})
            predictions = np.squeeze(predictions)
            image_pipe.stdout.flush()
            n = n + 1

        data_line = []
        for node_id in top_k:
            human_string = labels[node_id]
            score = predictions[node_id]

            score = float("{0:.4f}".format(score))
            data_line.append(human_string)
            data_line.append(score)

            # save frames that are around the decision boundary so they can then be used for later model re-training.
            if args.training == True:
                if score >= float(int(args.traininglower) / 100) and score <= float(int(args.trainingupper) / 100):
                    save_training_frames(raw_image, n, human_string, video_width, video_height, int(score * 100), video_filename)

        results.append(data_line)
        drawProgressBar(percentage(n, (num_of_frames)) / 100, 40)  # --------------------- Start processing logic

    print(' ')
    print(str(n - 1) + ' video frames processed for ' + video_file_name)
    return results
Example #39
0
def getResidual(target, predicted):
    """Create residual frame from target frame - predicted frame"""
    return np.subtract(target, predicted)
Example #40
0
def getMAD(tBlock, aBlock):
    """
    Returns Mean Absolute Difference between current frame macroblock (tBlock) and anchor frame macroblock (aBlock)
    """
    return np.sum(np.abs(np.subtract(tBlock, aBlock)))/(tBlock.shape[0]*tBlock.shape[1])
Example #41
0
def compute_r2(ytrue, ypred):

    numerator = np.sum(np.square(np.subtract(ytrue, ypred)))
    denominator = np.sum(np.square(np.subtract(ytrue, np.mean(ytrue))))

    return 1 - (numerator / denominator)
Example #42
0
def compute_mse(ytrue, ypred):
    return np.mean(np.square(np.subtract(ypred, ytrue)))
Example #43
0
def innovation_function(real_obs, pred_obs):
    #Not used by me
    return np.subtract(pred_obs, real_obs)
Example #44
0
    def predict_step(self, dt, save=False):
        # Create augmented state variable
        x_a = np.concatenate((self.x, self.Q.mu, self.R.mu))

        # Create augmented covariance
        L = self.S.dim + self.Q.dim + self.R.dim
        S_a = np.zeros((L, L))
        S_a[0:self.S.dim, 0:self.S.dim] = self.S.cov
        S_a[self.S.dim:self.S.dim + self.Q.dim,
            self.S.dim:self.S.dim + self.Q.dim] = self.Q.cov
        S_a[L - self.R.dim:, L - self.R.dim:] = self.R.cov
        S_a *= self.sqrt_L_plus_kappa

        # Calculate sigma points
        nsp = 2 * L + 1
        X_a = np.tile(x_a, (1, nsp))
        # Default sigma point calculation
        X_a[:, 1:self._xdim + 1] += S_a
        X_a[:, self._xdim + 1:] -= S_a
        # Special sigma point calculation
        if self.sigmapoint_fn is not None:
            X_a[self.special_xinds, :] = self.sigmapoint_fn(
                x_a[self.special_xinds], S_a[self.special_xinds, :])

        # Propagate sigmapoints through process function. It should know how to handle special_inds
        X_k = np.nan * np.ones((self._xdim, X_a.shape[1]))
        for sig_ix in range(X_a.shape[1]):
            X_k[:, sig_ix] = self.process_fn(
                X_a[:self._xdim, sig_ix],
                X_a[self._xdim + 1:self._xdim + self.Q.dim, sig_ix], dt)

        # Predicted state = weighted sum of propagated sigma points
        # Default mean
        x_k = self.W[0] * X_k[:, 0] + self.W[1] * np.sum(X_k[:, 1:], axis=1)
        if self.mean_fn is not None:
            # Special mean
            x_k[self.special_xinds] = self.mean_fn(X_k[self.special_xinds, :],
                                                   self.W)

        # Sigma residuals, used in process noise
        # Default residuals
        X_k_residuals = np.subtract(X_k, x_k)
        if self.residualx_fn is not None:
            # Special residuals
            for sig_ix in range(X_k.shape[1]):
                X_k_residuals[self.special_xinds, sig_ix] = self.residualx_fn(
                    X_k[self.special_xinds, sig_ix], x_k[self.special_xinds])

        # process noise = qr update of weighted X_k_residuals
        [_, S_k] = qr((self.sqrtW[1] * X_k_residuals[:, 1:]).T,
                      mode='economic')  # Upper
        S_k = cholupdate(S_k, self.sqrtW[2] * X_k_residuals[:, 0],
                         '+' if self.W2isPos else '-')  # Upper

        if save:
            self.X_a = X_a  # Augmented sigma points
            self.x_k = x_k  # Predicted state
            self.X_k = X_k  # Predicted sigma points
            self.X_k_residuals = X_k_residuals
            self.S_k = S_k  # Process noise

        return x_k, S_k.T
Example #45
0
 def worldCollisionTest(self):
     collisions = []
     for m1 in self.movers:
         if m1 in self.movers:
             # Collision against world boundaries
             if m1.position[0] < 0 or m1.position[0] > self.dimensions[
                     0] or m1.position[1] < 0 or m1.position[
                         1] > self.dimensions[1]:
                 collisions.append((m1, self))
             # Collision against obstacles
             for o in self.obstacles:
                 c = False
                 needCheckVertex = False
                 if isinstance(m1, Agent) and m1.moveTarget != None:
                     moverRadius = m1.getRadius()
                     lines = o.getLines()
                     direction = numpy.subtract(m1.moveTarget, m1.position)
                     magnitude = numpy.linalg.norm(direction)
                     if magnitude > 0:
                         direction = direction / magnitude
                     nextPosition = tuple(
                         numpy.add(m1.position,
                                   direction * (m1.speed[0] + moverRadius)))
                     p = rayTraceWorldNoEndPoints(m1.position, nextPosition,
                                                  lines)
                     if p == None:
                         needCheckVertex = True
                 if needCheckVertex:
                     for v in o.getPoints():
                         # check v between m1.position and nextPosition
                         if between(v[0], m1.position[0],
                                    nextPosition[0]) and between(
                                        v[1], m1.position[1],
                                        nextPosition[1]):
                             d = minimumDistance(
                                 (m1.position, nextPosition), v)
                             if d < moverRadius:
                                 needCheckVertex = False
                                 break
                     if needCheckVertex:
                         for l in lines:
                             if minimumDistance(l,
                                                nextPosition) < moverRadius:
                                 needCheckVertex = False
                                 break
                 if not needCheckVertex:
                     for l in o.getLines():
                         for r in ((m1.rect.topleft, m1.rect.topright),
                                   (m1.rect.topright, m1.rect.bottomright),
                                   (m1.rect.bottomright,
                                    m1.rect.bottomleft),
                                   (m1.rect.bottomleft, m1.rect.topleft)):
                             hit = calculateIntersectPoint(
                                 l[0], l[1], r[0], r[1])
                             if hit is not None:
                                 c = True
                 if c:
                     collisions.append((m1, o))
             # Movers against movers
             for m2 in self.movers:
                 if m2 in self.movers:
                     if m1 != m2:
                         if (m1, m2) not in collisions and (
                                 m2, m1) not in collisions:
                             if m1.rect.colliderect(m2.rect):
                                 collisions.append((m1, m2))
     for c in collisions:
         c[0].collision(c[1])
         c[1].collision(c[0])
Example #46
0
 def mti_improvement(self, iq_mat):
     improved_iq = np.zeros((iq_mat.shape[0]-1,iq_mat.shape[1]),dtype=np.complex128)
     improved_iq[0,:] = iq_mat[0,:]
     for fast_time_sample_index in range(1,iq_mat.shape[0] - 1):
         improved_iq[fast_time_sample_index,:] = np.subtract(iq_mat[fast_time_sample_index,:], iq_mat[fast_time_sample_index - 1,:])
     return improved_iq
Example #47
0
def dcg_at_k(r, k):
    r = np.asfarray(r)[:k]
    if r.size:
        return np.sum(
            np.subtract(np.power(2, r), 1) / np.log2(np.arange(2, r.size + 2)))
    return 0.
Example #48
0
def check_func(argum1):

    to_email = ['*****@*****.**']
    np.set_printoptions(threshold=sys.maxsize)
    #print("inside: ", argum1)
    FRmodel = faceRecoModel(input_shape=(3, 96, 96))
    lost = '/home/zemotacqy/hack-moscow-backend/routes/../uploads/lost'
    #print("lost FilePath: ", lost)
    files = os.listdir(lost)
    #print(files)
    #sys.stdout.flush()
    FRmodel.compile(optimizer='adam', loss=triplet_loss, metrics=['accuracy'])
    #print("Loading weitghts:")
    load_weights_from_FaceNet(FRmodel)
    cur_encoding = img_to_encoding(argum1, FRmodel)
    #print(cur_encoding)
    #sys.stdout.flush()
    check = 0
    best_match_found = ""
    cur_b_sc = 0.7
    for i in files:
        fadd = join(lost, i)
        imgs2 = os.listdir(fadd)
        imgs = []
        for file in imgs2:
            if file.endswith(".pkl"):
                imgs.append(file)
        #horussurya needforspeed
        for j in imgs:
            with open(join(fadd, j), 'rb') as f:
                val = pickle.load(f)
            dist = np.linalg.norm(np.subtract(cur_encoding, val))
            if dist < cur_b_sc:
                cur_b_sc = dist
                check = 1
                best_match_found = i
    if (check == 1):
        print(best_match_found)
        print(cur_b_sc)
        import smtplib
        from email.mime.text import MIMEText
        from email.mime.multipart import MIMEMultipart

        email_user = '******'
        email_password = '******'
        email_send = to_email[0]
        print(email_send)
        lat = '55.815480'
        long = '7.575385'
        subject = 'Unlost.ai'

        msg = MIMEMultipart()
        msg['From'] = email_user
        msg['To'] = email_send
        msg['Subject'] = subject

        body = 'Hi the missing person you reported has been found! The match error was very low! (around ' + str(
            cur_b_sc) + ').'
        body = body + 'The person was found at (' + lat + "   " + l + ')!'
        msg.attach(MIMEText(body, 'plain'))

        # filename='filename'
        # attachment  =open(filename,'rb')

        # part = MIMEBase('application','octet-stream')
        # part.set_payload((attachment).read())
        # encoders.encode_base64(part)
        # part.add_header('Content-Disposition',"attachment; filename= "+filename)

        # msg.attach(part)
        text = msg.as_string()
        server = smtplib.SMTP('smtp.gmail.com', 587)
        server.starttls()
        server.login(email_user, email_password)

        server.sendmail(email_user, email_send, text)
        server.quit()

        sys.stdout.flush()
    else:
        print("NULL")
        print("NULL")
Example #49
0
                        for b in range(len(datapart[document])):
                            if a != b:
                                result = 0.0 if r(datapart[document], a, b, match_enum) != 1.0 else 1.0
                                full_pos_count += result
                                full_neg_count += 1.0 - result
                                full_count += 1.0


                def logL(k, n):
                    return k * np.log(k / n) + (n - k) * np.log(1 - (k / n))


                lr = 0 if pos_count == 0 else \
                    2 * (logL(pos_count, full_pos_count) + logL(neg_count, full_neg_count) -
                         logL(count, full_count) - logL((full_count - count), full_count))
                new_coeffs[match] += lr / (len(dataset) / DATAPART_SIZE) if lr > 15 else coeffs[match]

            dataset_iterator += DATAPART_SIZE

        coeff_delta = sum(np.abs(np.subtract(coeffs, new_coeffs)))
        coeffs = np.divide(new_coeffs, np.average(new_coeffs))
        print("Delta:", coeff_delta)
        #print(coeffs)
        if coeff_delta < LEARN_THRESHOLD:
            pass
        iterations += 1

    print(coeffs)
# [7547.779687549429, 7370.560696846439, 0, 7621.7671731875935, 7359.9630068893475, 0, 7393.059471743922, 0, 0, 7377.714808089091]
# [17390.609054977325, 17005.836521839523, 0, 17341.00307384879, 17200.020015525355, 0, 17001.184609536514, 0, 0, 17136.45645576466]
# [2, 0, 0, 3, 0, 0, 0, 0, 0, 1]
Example #50
0
 def TestAnalyzer(Ypredict, Ytest):
     RMS = np.sqrt(
         np.sum(np.square(np.subtract(Ypredict, Ytest))) / len(Ytest))
     corr_coef = np.corrcoef(Ypredict, Ytest)[0][1]
     return RMS, corr_coef
Example #51
0
    def evaluateWholes(self, ID_A, ID_B):
        #print("Evaluating wholes...", flush=True)
        # load gt wholes
        gt_wholes_filepath = self.folder_path + "/" + ID_A + "/" + "wholes"
        box = [1]
        wholes_gt = readData(box, gt_wholes_filepath)

        # load block wholes
        inBlocks_wholes_filepath = self.folder_path + "/" + ID_B + "/" + "wholes"
        box = [1]
        wholes_inBlocks = readData(box, inBlocks_wholes_filepath)

        try:  # check that both can be converted to int16
            if np.max(wholes_gt) > 32767 or np.max(wholes_inBlocks) > 32767:
                raise ValueError(
                    "Cannot convert wholes to int16 (max is >32767)")
        except:
            print(
                "Cannot convert wholes to int16 (max is >32767) -  ignored this Error",
                flush=True)

        wholes_gt = wholes_gt.astype(np.int16)
        wholes_inBlocks = wholes_inBlocks.astype(np.int16)
        wholes_gt = np.subtract(wholes_gt, wholes_inBlocks)
        diff = wholes_gt
        # free some RAM
        del wholes_gt, wholes_inBlocks

        print("Freed memory", flush=True)

        if np.min(diff) < 0:
            FP = diff.copy()
            FP[FP > 0] = 0
            n_points_FP = np.count_nonzero(FP)
            n_comp_FP = computeConnectedComp26(FP) - 1
            print("FP classifications (points/components): " +
                  str(n_points_FP) + "/ " + str(n_comp_FP),
                  flush=True)

            # unique_values = np.unique(FP)
            # for u in unique_values:
            #     if u!=0:
            #         print("Coordinates of component " + str(u))
            #         coods = np.argwhere(FP==u)
            #         for i in range(coods.shape[0]):
            #             print(str(coods[i,0]) + ", " + str(coods[i,1]) + ", " + str(coods[i,2]))

            del FP
        else:
            print("No FP classification", flush=True)

        if np.max(diff) > 0:
            FN = diff.copy()
            FN[FN < 0] = 0
            n_points_FN = np.count_nonzero(FN)
            n_comp_FN = computeConnectedComp26(FN) - 1
            print("FN classifications (points/components): " +
                  str(n_points_FN) + "/ " + str(n_comp_FN),
                  flush=True)
            del FN

        else:
            print("No FN classification", flush=True)

        output_name = 'diff_wholes_' + ID_A + "_" + ID_B
        writeData(self.folder_path + "/" + ID_B + "/" + output_name, diff)

        del diff
 def prewhiten(self, x):
     mean = np.mean(x)
     std = np.std(x)
     std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
     y = np.multiply(np.subtract(x, mean), 1 / std_adj)
     return y
def calculate_posvij_matrices(main_tetrad_ark):
    """ Remember that the main_tetrad_ark is a list of lists,
        with each list containing four tuples, with tuples being
        matrix number and the matrices itself. """

    # Import all the possible solutions to the Vij matrices
    vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()
    vij_matrices = []

    print("                            ")
    print("    Calculating Vij matrices")
    print("                            ")
    # for i in range(0, len(main_tetrad_ark)):
    for i in range(0, len(vij_possibilities)):
        tet_i = [x[1] for x in main_tetrad_ark[i]]
        tri_tet = [np.transpose(i) for i in tet_i]
        print("# ********************************")
        # print("                                     ")
        print("MATRIX i: ", i)
        print("                                     ")
        for j in range(0, len(main_tetrad_ark)):
            tet_j = [x[1] for x in main_tetrad_ark[j]]
            trj_tet = [np.transpose(j) for j in tet_j]
            vij_temp = []
            # print("# ********************************")
            print("        ")
            print("MATRIX j: ", j)
            temp_zero = np.zeros((4, 4), dtype=int)
            for x in range(0, len(tet_i)):
                test_1half = np.dot(tri_tet[x], tet_j[x])
                test_2half = np.dot(trj_tet[x], tet_i[x])
                test_difs = np.subtract(test_1half, test_2half)
                # print(" ")
                # print(test_difs)
                temp_mat = np.dot(tri_tet[x], tet_j[x]) - np.dot(
                    trj_tet[x], tet_i[x])
                vij_temp.append(temp_mat)
                # print("")
            temp_add1 = np.add(vij_temp[0], vij_temp[1])
            temp_add2 = np.add(temp_add1, vij_temp[2])
            tempf = np.add(temp_add2, vij_temp[3])
            # tempf = np.divide(temp_add3, 2)
            for ijx in vij_possibilities:
                if np.array_equal(temp_addf, ijx[0]):
                    print("*************$$$$$$$$$$$$$$$$$$***************** ")
                    print("l-solution found:", ijx[1])
                    print(temp_addf)
                    print("")
                    print(ijx[0])
            if np.array_equal(temp_addf, temp_zero):
                pass
            else:
                vij_matrices.append(temp_addf)
            # print("")
            print(temp_addf)
            # vij_matrices.append(temp_addf)
        vijmats_size = sys.getsizeof(vij_matrices)
        print("Size of Vij Matrices list: bytes / kilobytes:", vijmats_size,
              vijmats_size / 1024)
    print("Length of Vij Matrices")
    print(len(vij_matrices))
    print(vij_matrices)
    pass
Example #54
0
def getDistance(coords1, coords2, unitcell=None):

    diff = coords1 - coords2
    if unitcell is not None:
        diff = subtract(diff, round(diff/unitcell)*unitcell, diff)
    return sqrt(power(diff, 2, diff).sum(axis=-1))
def Calculation(initial_guess, upper_lower_matrix, inverse_of_diagonal):
      new_value = np.matmul(inverse_of_diagonal, np.subtract(initial_vector,
                                      np.matmul(upper_lower_matrix, initial_guess)))
      return (new_value)
Example #56
0
    def to_svg(self, file=None, canvas_shape=None):
        """Convert the current layer state to an SVG.

        Parameters
        ----------
        file : path-like object, optional
            An object representing a file system path. A path-like object is
            either a str or bytes object representing a path, or an object
            implementing the `os.PathLike` protocol. If passed the svg will be
            written to this file
        canvas_shape : 4-tuple, optional
            View box of SVG canvas to be generated specified as `min-x`,
            `min-y`, `width` and `height`. If not specified, calculated
            from the last two dimensions of the layer.

        Returns
        ----------
        svg : string
            SVG representation of the layer.
        """

        if canvas_shape is None:
            min_shape = [r[0] for r in self.dims.range[-2:]]
            max_shape = [r[1] for r in self.dims.range[-2:]]
            shape = np.subtract(max_shape, min_shape)
        else:
            shape = canvas_shape[2:]
            min_shape = canvas_shape[:2]

        props = {
            'xmlns': 'http://www.w3.org/2000/svg',
            'xmlns:xlink': 'http://www.w3.org/1999/xlink',
        }

        xml = Element(
            'svg',
            height=f'{shape[0]}',
            width=f'{shape[1]}',
            version='1.1',
            **props,
        )

        transform = f'translate({-min_shape[1]} {-min_shape[0]})'
        xml_transform = Element('g', transform=transform)

        xml_list = self.to_xml_list()
        for x in xml_list:
            xml_transform.append(x)
        xml.append(xml_transform)

        svg = ('<?xml version=\"1.0\" standalone=\"no\"?>\n' +
               '<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n' +
               '\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n' +
               tostring(xml, encoding='unicode', method='xml'))

        if file:
            # Save svg to file
            with open(file, 'w') as f:
                f.write(svg)

        return svg
Example #57
0
def fit_t(x, precision):
    print('Performing fit_t...')
#    x = np.random.multivariate_normal(da);
    (I,D) = np.shape(x)
    # Initialize the Mean 
    #dataset_mean = np.divide(np.sum(x,axis = 0),I)
    dataset_mean = x.mean(axis=0)
    mu = np.reshape(dataset_mean,(1,-1))
    
    #Initialize sig to the covariance of the dataset.
    dataset_variance = np.zeros([D, D])
    x_minus_dataset_mean = np.subtract(x, dataset_mean)
    for i in range(I):
        mat = np.reshape(x_minus_dataset_mean[i,:],(1,-1))
        mat = np.dot(np.transpose(mat),mat)
        dataset_variance = dataset_variance + mat;
    sig = np.divide(dataset_variance,I);

    ##Initialize degrees of freedom to 1000 (just a random large value).
    nu = 1
    ##The main loop.
    iterations = 0    
    previous_L = 1000000 # just a random initialization
    delta = np.zeros([I,1])
    #delta1 = np.zeros([I,1])
    while True:
        #Expectation step.
        #Compute delta.
        x_minus_mu = np.subtract(x, mu)
        temp = np.dot(x_minus_mu,np.linalg.inv(sig))
        for i in range(I):
            delta[i] = np.dot(np.reshape(temp[i,:],(1,-1)),np.transpose(np.reshape(x_minus_mu[i,:],(1,-1))))
            
        # Compute E_hi.
        nu_plus_delta = nu + delta
        E_hi = np.divide((nu + D),nu_plus_delta)
        ## Compute E_log_hi.
        E_log_hi = psi((nu+D)/2) - np.log(nu_plus_delta/2);
      
        ## Maximization step.
        ## Update mu.
                
        E_hi_sum = np.sum(E_hi)
        E_hi_times_xi = E_hi * x
        mu = np.reshape(np.sum(E_hi_times_xi, axis=0),(1,-1))
        mu = np.divide(mu,E_hi_sum)
        ## Update sig.
        x_minus_mu = np.subtract(x, mu)
        sig = np.zeros([D,D])
        for i in range(I):
            xmm = np.reshape(x_minus_mu[i,:],(1,-1))
            sig = sig + (E_hi[i] * np.dot(np.transpose(xmm),xmm))
        sig = sig / E_hi_sum
        
        #Update nu by minimizing a cost function with line search.
        nu = ftc.fit_t_cost(E_hi,E_log_hi)
        
        ## Compute delta again, because the parameters were updated.
        temp = np.dot(x_minus_mu,np.linalg.inv(sig))
        # temp1 = np.linalg.inv(sig)
        for i in range(I):
            delta[i] = np.dot(np.reshape(temp[i,:],(1,-1)),np.transpose(np.reshape(x_minus_mu[i,:],(1,-1))))
            
        ## Compute the log likelihood L.
        (sign, logdet) =  np.linalg.slogdet(np.array(sig))
        L = I * (gammaln((nu+D)/2) - (D/2)*np.log(nu*np.pi) - logdet/2 - gammaln(nu/2))
        s = np.sum(np.log(1 + np.divide(delta,nu))) / 2
        L = L - (nu+D)*s;
        iterations = iterations + 1;
        print(str(iterations)+' : '+str(L))
        if (np.absolute(L - previous_L) < precision) or iterations == 100:
            break
        previous_L = L;
    return(mu,sig,nu)
def sd(data1, data2):
    return np.sum(np.power(np.subtract(data1, data2), 2) / np.power(data1, 2)) / len(data1)
Example #59
0
def help_periodiclist(lx,x1,pbc):
    if(pbc==0):
        dx=numpy.abs(numpy.subtract(range(int(lx)),x1))
    else:
        dx=numpy.abs(numpy.minimum(numpy.minimum(numpy.abs(numpy.subtract(range(lx),x1)),numpy.abs(numpy.subtract(range(lx),lx+x1))),numpy.abs(numpy.subtract(range(lx),-lx+x1))))
    return dx
Example #60
0
# get number of sites from size of long and lat:
nsites = len(sites_lon)

# Define GEOS-Chem data obtained at same location as monitoring sites:
gc_data_nitrate_annual = np.zeros(nsites)

gc_data_nitrate_mam = np.zeros(nsites)
gc_data_nitrate_jja = np.zeros(nsites)
gc_data_nitrate_son = np.zeros(nsites)
gc_data_nitrate_djf = np.zeros(nsites)

#extract GEOS-Chem data using DEFRA sites lat long
for w in range(len(sites_lat)):
    #print ((sites_lat[w],gc_lat))
    # lat and lon indices:
    lon_index = np.argmin(np.abs(np.subtract(sites_lon[w], gc_lon)))
    lat_index = np.argmin(np.abs(np.subtract(sites_lat[w], gc_lat)))

    #print (lon_index)
    #print (lat_index)
    gc_data_nitrate_annual[w] = GC_surface_nitrate_AM[lon_index, lat_index]
    gc_data_nitrate_mam[w] = GC_surface_nitrate_mam[lon_index, lat_index]
    gc_data_nitrate_jja[w] = GC_surface_nitrate_jja[lon_index, lat_index]
    gc_data_nitrate_son[w] = GC_surface_nitrate_son[lon_index, lat_index]
    gc_data_nitrate_djf[w] = GC_surface_nitrate_djf[lon_index, lat_index]

print(gc_data_nitrate_annual.shape)
print(sites_nitrate_AM.shape)

# quick scatter plot
#plt.plot(sites_nitrate_AM,gc_data_nitrate_annual,'o')