Exemplo n.º 1
0
def save(data, outputdir, outputfile, outputformat):
    """
    Save data to a variety of formats
    Automatically determines whether data is an array
    or an RDD and handles appropriately
    For RDDs, data are sorted and reshaped based on the keys

    :param data: RDD of key value pairs or array
    :param outputdir: Location to save data to
    :param outputfile: file name to save data to
    :param outputformat: format for data ("matlab", "text", or "image")
    """

    filename = os.path.join(outputdir, outputfile)

    if (outputformat == "matlab") | (outputformat == "text"):
        if isrdd(data):
            dims = getdims(data)
            data = subtoind(data, dims.max)
            keys = data.map(lambda (k, _): int(k)).collect()
            nout = size(data.first()[1])
            if nout > 1:
                for iout in range(0, nout):
                    result = data.map(lambda (_, v): float16(v[iout])).collect()
                    result = array([v for (k, v) in sorted(zip(keys, result), key=lambda (k, v): k)])
                    if outputformat == "matlab":
                        savemat(filename+"-"+str(iout)+".mat",
                                mdict={outputfile+str(iout): squeeze(transpose(reshape(result, dims.num[::-1])))},
                                oned_as='column', do_compression='true')
                    if outputformat == "text":
                        savetxt(filename+"-"+str(iout)+".txt", result, fmt="%.6f")
            else:
                result = data.map(lambda (_, v): float16(v)).collect()
                result = array([v for (k, v) in sorted(zip(keys, result), key=lambda (k, v): k)])
Exemplo n.º 2
0
 def testBrokenTypes(self):
   with self.assertRaisesWithPredicateMatch(TypeError, "Categorical"):
     distributions_py.Mixture(None, [])
   cat = distributions_py.Categorical([0.3, 0.2])
   # components must be a list of distributions
   with self.assertRaisesWithPredicateMatch(
       TypeError, "all .* must be Distribution instances"):
     distributions_py.Mixture(cat, [None])
   with self.assertRaisesWithPredicateMatch(TypeError, "same dtype"):
     distributions_py.Mixture(
         cat, [
             distributions_py.Normal(
                 mu=[1.0], sigma=[2.0]), distributions_py.Normal(
                     mu=[np.float16(1.0)], sigma=[np.float16(2.0)])
         ])
   with self.assertRaisesWithPredicateMatch(ValueError, "non-empty list"):
     distributions_py.Mixture(distributions_py.Categorical([0.3, 0.2]), None)
   with self.assertRaisesWithPredicateMatch(TypeError,
                                            "either be continuous or not"):
     distributions_py.Mixture(
         cat, [
             distributions_py.Normal(
                 mu=[1.0], sigma=[2.0]), distributions_py.Bernoulli(
                     dtype=dtypes.float32, logits=[1.0])
         ])
Exemplo n.º 3
0
  def testReprWorksCorrectlyScalar(self):
    normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
    self.assertEqual(
        ("<tf.distributions.Normal"
         " 'Normal'"
         " batch_shape=()"
         " event_shape=()"
         " dtype=float16>"),  # Got the dtype right.
        repr(normal))

    chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
    self.assertEqual(
        ("<tf.distributions.Chi2"
         " 'silly'"  # What a silly name that is!
         " batch_shape=(2,)"
         " event_shape=()"
         " dtype=float32>"),
        repr(chi2))

    exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
    self.assertEqual(
        ("<tf.distributions.Exponential"
         " 'Exponential'"
         " batch_shape=<unknown>"
         " event_shape=()"
         " dtype=float32>"),
        repr(exp))
Exemplo n.º 4
0
  def testStrWorksCorrectlyScalar(self):
    normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
    self.assertEqual(
        ("tf.distributions.Normal("
         "\"Normal\", "
         "batch_shape=(), "
         "event_shape=(), "
         "dtype=float16)"),  # Got the dtype right.
        str(normal))

    chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
    self.assertEqual(
        ("tf.distributions.Chi2("
         "\"silly\", "  # What a silly name that is!
         "batch_shape=(2,), "
         "event_shape=(), "
         "dtype=float32)"),
        str(chi2))

    exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
    self.assertEqual(
        ("tf.distributions.Exponential(\"Exponential\", "
         # No batch shape.
         "event_shape=(), "
         "dtype=float32)"),
        str(exp))
Exemplo n.º 5
0
    def __init__(self, 
                 Agent, 
                 alpha, 
                 gamma, 
                 epsilon):
        '''
        Fill all values of Q based on a given optimistic value.
        '''

        # Set the agent for this QLearning session
        self.Agent = Agent
        self.alpha = alpha
        self.gamma = gamma
        self.epsilon = epsilon
        self.policy = dict()
        self.Q = dict()
        self.V = dict()
        S = set( [ (i,j) for i in range(-5,6) for j in range(-5,6)] )
        for s in S:
            self.V[s] = numpy.float16( 0 )
            self.Q[s] = dict()
            self.policy[s] = dict()
            
            for a in self.Agent.actions:
                self.policy[s][a] = numpy.float16( 1.0 / len( self.Agent.actions ) )
                
                for o in self.Agent.actions:
                    self.Q[s][(a,o)] = numpy.float16( 0.0 )
    def callback_1(self, data):
        # print "difference_cal receives position_1 update. Processing...."
        self._position_1_x = data.x
        self._position_1_y = data.y
        self._position_1_ID = data.ID
        # xmin = self._position_1_x + 0.1
        # xmax = self._position_1_x + 0.6
        # ymin = self._position_1_y - 0.3
        # ymax = self._position_1_y + 0.3
        # if self._position_0_x > xmin and self._position_0_x < xmax and self._position_0_y > ymin and self._position_0_y < ymax:
        #     w1 = 1
        # else:
        #     w1 = 0
        # xmin = self._position_1_x - 0.3
        # xmax = self._position_1_x + 0.3
        # ymin = self._position_1_y + 0.1
        # ymax = self._position_1_y + 0.6
        # if self._position_0_x > xmin and self._position_0_x < xmax and self._position_0_y > ymin and self._position_0_y < ymax:
        #     w2 = 1
        # else:
        #     w2 = 0
        # xmin = self._position_1_x - 0.6
        # xmax = self._position_1_x - 0.1
        # ymin = self._position_1_y - 0.3
        # ymax = self._position_1_y + 0.3
        # if self._position_0_x > xmin and self._position_0_x < xmax and self._position_0_y > ymin and self._position_0_y < ymax:
        #     w3 = 1
        # else:
        #     w3 = 0
        # xmin = self._position_1_x - 0.3
        # xmax = self._position_1_x + 0.3
        # ymin = self._position_1_y - 0.6
        # ymax = self._position_1_y - 0.1
        # if self._position_0_x > xmin and self._position_0_x < xmax and self._position_0_y > ymin and self._position_0_y < ymax:
        #     w4 = 1
        # else:
        #     w4 = 0
        if not self._position_0_x:
            self._position_0_x = [-1]*len(self._position_1_x)
            self._position_0_y = [-1]*len(self._position_1_x)
        w1 = list(np.float16(np.array(self._position_0_x)) - np.float16(np.array(self._position_1_x)))
        w2 = list(np.float16(np.array(self._position_0_y)) - np.float16(np.array(self._position_1_y)))
        if abs(w1[0])<0.3 and abs(w2[0])<0.3:
            rospy.loginfo("The two car are too close with distance of x and y: %s, %s", str(w1[0]), str(w1[2]))
        w3 = list()
        for i in range(len(w1)):
            if abs(w1[i])<1 and abs(w2[i])<1:
                w3.append(1)
            else:
                w3.append(0)
        # print w3
        w1 = [0]*len(w1)
        w2 = [0]*len(w1)
        w4 = [0]*len(w1)
        # print list(self._position_0_x)
        # print list(self._position_0_y)

        self.pub_2.publish(w1, w2, w3, w4, list(self._position_0_x), list(self._position_0_y), self._position_1_ID)
        # print "disturbance_signal_1 sent with w3 = %s, and ID = %s."%(str(w3), str(self._position_1_ID))
        print "disturbance_signal_1 ID = %s."%(str(self._position_1_ID))
    def callback_1(self, data):
        # print "difference_cal receives position_1 update. Processing...."
        self._position_1_x = data.x
        self._position_1_y = data.y
        self._position_1_ID = data.ID
        if not self._position_0_x:
            self._position_0_x = [-1]*len(self._position_1_x)
            self._position_0_y = [-1]*len(self._position_1_x)
        w1 = list(np.float16(np.array(self._position_0_x)) - np.float16(np.array(self._position_1_x)))
        w2 = list(np.float16(np.array(self._position_0_y)) - np.float16(np.array(self._position_1_y)))
        if self._position_1_ID < self._horizon:
            if abs(w1[self._position_1_ID])<0.3 and abs(w2[self._position_1_ID])<0.3:
                rospy.loginfo("The two car are getting close.")
        else:
            if abs(w1[self._horizon])<0.3 and abs(w2[self._horizon])<0.3:
                rospy.loginfo("The two car are getting close.")
        w3 = list()
        for i in range(len(w1)):
            if abs(w1[i])<1 and abs(w2[i])<1:
                w3.append(1)
            else:
                w3.append(0)
        # print w3
        # w1 = [0]*len(w1)
        # w2 = [0]*len(w1)
        # w4 = [0]*len(w1)
        # print list(self._position_0_x)
        # print list(self._position_0_y)

        self.pub_2.publish(w3, list(self._position_0_x), list(self._position_0_y), self._position_1_ID)
        # print "disturbance_signal_1 sent with w3 = %s, and ID = %s."%(str(w3), str(self._position_1_ID))
        if self._position_1_ID < self._horizon:
            print "disturbance_signal_1 ID = %s. pos is %s, %s."%(str(self._position_1_ID),str(self._position_1_x[self._position_1_ID]), str(self._position_1_y[self._position_1_ID]))
        else:
            print "disturbance_signal_1 ID = %s. pos is %s, %s."%(str(self._position_1_ID),str(self._position_1_x[self._horizon]), str(self._position_1_y[self._horizon]))
    def estimate(self, iterations=100, tolerance=1e-5):
        last_rmse = None

        # the algorithm will converge, but really slow
        # use MF's initialize latent parameter will be better
        for iteration in xrange(iterations):
            # update item & user parameter
            self._update_item_params()
            self._update_user_params()

            # update item & user_features
            self._udpate_item_features()
            self._update_user_features()

            # compute RMSE
            # train errors
            train_preds = self.predict(self.train)
            train_rmse = RMSE(train_preds, np.float16(self.train[:, 2]))

            # validation errors
            validation_preds = self.predict(self.validation)
            validation_rmse = RMSE(
                validation_preds, np.float16(self.validation[:, 2]))
            self.train_errors.append(train_rmse)
            self.validation_erros.append(validation_rmse)
            print "iterations: %3d, train RMSE: %.6f, validation RMSE: %.6f " % (iteration + 1, train_rmse, validation_rmse)

            # stop if converge
            if last_rmse:
                if abs(train_rmse - last_rmse) < tolerance:
                    break

            last_rmse = train_rmse
Exemplo n.º 9
0
    def test_invalid(self):
        prop = bcpp.Int()

        assert not prop.is_valid(0.0)
        assert not prop.is_valid(1.0)
        assert not prop.is_valid(1.0+1.0j)
        assert not prop.is_valid("")
        assert not prop.is_valid(())
        assert not prop.is_valid([])
        assert not prop.is_valid({})
        assert not prop.is_valid(_TestHasProps())
        assert not prop.is_valid(_TestModel())

        assert not prop.is_valid(np.bool8(False))
        assert not prop.is_valid(np.bool8(True))
        assert not prop.is_valid(np.float16(0))
        assert not prop.is_valid(np.float16(1))
        assert not prop.is_valid(np.float32(0))
        assert not prop.is_valid(np.float32(1))
        assert not prop.is_valid(np.float64(0))
        assert not prop.is_valid(np.float64(1))
        assert not prop.is_valid(np.complex64(1.0+1.0j))
        assert not prop.is_valid(np.complex128(1.0+1.0j))
        if hasattr(np, "complex256"):
            assert not prop.is_valid(np.complex256(1.0+1.0j))
Exemplo n.º 10
0
    def _costFunction(self,ep1,ep2,method):
        # Endpoint Atribute (1)
        x1 = numpy.array(ep1.Position,dtype=numpy.float16)
        o1 = numpy.array(ep1.Orientation,dtype=numpy.float16)
        t1 = numpy.float16(ep1.Thickness)

        # Endpoint Atribute (2)
        x2 = numpy.array(ep2.Position,dtype=numpy.float16)
        o2 = numpy.array(ep2.Orientation,dtype=numpy.float16)
        t2 = numpy.float16(ep2.Thickness)

        # Pair Features
        c = (x1+x2)/2                           # centre
        d = numpy.linalg.norm(x1-x2)            # distance
        k1= (x1-c) / numpy.linalg.norm(x1-c)    # vector pointing to center (1)
        k2= (x2-c) / numpy.linalg.norm(x2-c)    # vector pointing to center (2)

        # Meassure if orientation of endpoints is alligned [0: <- -> , 1: -> <-]
        A=1-(2+numpy.dot(k1,o1)+numpy.dot(k2,o2))/4   #[0,1]

        if d>=self.maxDist:return -1
        if A<self.minOrie:return -1

        if method == "dist":
            return d
Exemplo n.º 11
0
    def test_nans_infs(self):
        oldsettings = np.seterr(all='ignore')
        try:
            # Check some of the ufuncs
            assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
            assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
            assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
            assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
            assert_equal(np.spacing(float16(65504)), np.inf)

            # Check comparisons of all values with NaN
            nan = float16(np.nan)

            assert_(not (self.all_f16 == nan).any())
            assert_(not (nan == self.all_f16).any())

            assert_((self.all_f16 != nan).all())
            assert_((nan != self.all_f16).all())

            assert_(not (self.all_f16 < nan).any())
            assert_(not (nan < self.all_f16).any())

            assert_(not (self.all_f16 <= nan).any())
            assert_(not (nan <= self.all_f16).any())

            assert_(not (self.all_f16 > nan).any())
            assert_(not (nan > self.all_f16).any())

            assert_(not (self.all_f16 >= nan).any())
            assert_(not (nan >= self.all_f16).any())
        finally:
            np.seterr(**oldsettings)
Exemplo n.º 12
0
def dist_vec(training, test):
    
    n1, d = training.shape
    n2, d1 = test.shape
       
    assert n1 != 0, 'Training set is empty'
    assert n2 != 0, 'Test set is empty'
    assert d==d1, 'Images in training and test sets have different size'
     
    tstart = time.time()
   
    train_squared = np.sum(np.square(training), axis = 1)
    test_squared = np.sum(np.square(test), axis = 1)
    
    A = np.tile(train_squared, (n2,1)) # n2xn1 matrix
    A = A.transpose((1,0))    # n1xn2 matrix    
    B = np.tile(test_squared, (n1,1) ) # n2xn2 matrix

    a = np.tile(training, (1,1,1)) # 1xn1x64 matrix
    a = a.transpose((1,0,2))    # n1x1x64 matrix
    b = np.tile(test, (1,1,1) ) # 1xn2x64 matrix
    
    C = np.tensordot(a,b, [[1,2],[0,2]])
    
    dist = A + B - C - C
    
    dist = np.sqrt(dist)
    np.float16(dist)
    
    tstop = time.time()
    
    return dist, tstop-tstart    
Exemplo n.º 13
0
def Scale_Image(image): #Scales the input image to the range:(0,255)
	min = np.amin(image)
	max = np.amax(image)
	
	image -= min
	image = np.float16(image) * (np.float16(255) / np.float16(max-min))
	image = np.uint8(image)

	return image
Exemplo n.º 14
0
def npHalfArrayToOIIOFloatPixels(width, height, channels, npPixels):
    if oiio.VERSION < 10800:
        # Read half-float pixels into a numpy float pixel array
        oiioFloatsArray = np.frombuffer(np.getbuffer(np.float16(npPixels)), dtype=np.float32)
    else:
        # Read half-float pixels into a numpy float pixel array
        oiioFloatsArray = np.frombuffer(np.getbuffer(np.float16(npPixels)), dtype=np.uint16)

    return oiioFloatsArray
    def __init__(self,timeSeries=None,
                 lenSeries=2**18,
                 numChannels=1,
                 fMin=400,fMax=800,
                 sampTime=None,
                 noiseRMS=0.1):
        """ Initializes the AmplitudeTimeSeries instance. 
        If a array is not passed, then a random whitenoise dataset is generated.
        Inputs: 
        Len -- Number of time data points (usually a power of 2) 2^38 gives about 65 seconds 
        of 400 MHz sampled data
        The time binning is decided by the bandwidth
        fMin -- lowest frequency (MHz)
        fMax -- highest frequency (MHz)
        noiseRMS -- RMS value of noise (TBD)
        noiseAlpha -- spectral slope (default is white noise) (TBD)
        ONLY GENERATES WHITE NOISE RIGHT NOW!
        """
        self.shape = (np.uint(numChannels),np.uint(lenSeries))
        self.fMax = fMax
        self.fMin = fMin        
        
        if sampTime is None:
            self.sampTime = np.uint(numChannels)*1E-6/(fMax-fMin)
        else:
            self.sampTime = sampTime

        if timeSeries is None:
            # then use the rest of the data to generate a random timeseries
            if VERBOSE:
                print "AmplitudeTimeSeries __init__ did not get new data, generating white noise data"

            self.timeSeries = np.complex64(noiseRMS*(np.float16(random.standard_normal(self.shape))
                                                     +np.float16(random.standard_normal(self.shape))*1j)/np.sqrt(2))
            
        else:
            if VERBOSE:
                print "AmplitudeTimeSeries __init__ got new data, making sure it is reasonable."

            if len(timeSeries.shape) == 1:
                self.shape = (1,timeSeries.shape[0])
                
            else:
                self.shape = timeSeries.shape

            self.timeSeries = np.reshape(np.complex64(timeSeries),self.shape)
            
            self.fMin = fMin
            self.fMax = fMax

            if sampTime is None:
                self.sampTime = numChannels*1E-6/(fMax-fMin)
            else:
                self.sampTime = sampTime

        return None
Exemplo n.º 16
0
def compute_grad_img(image, height, width):
    image_Sobel_x = Sobel(image, CV_64F, 1, 0, ksize=5)
    image_Sobel_y = Sobel(image, CV_64F, 0, 1, ksize=5)
    img_grad = zeros((height, width), dtype=tuple)
    for row in range(height):
        for col in range(width):
            img_grad[row, col] = (float16(sqrt(image_Sobel_x[row, col][0] ** 2 + image_Sobel_y[row, col][0] ** 2)), \
                                    float16(sqrt(image_Sobel_x[row, col][1] ** 2 + image_Sobel_y[row, col][1] ** 2)), \
                                    float16(sqrt(image_Sobel_x[row, col][2] ** 2 + image_Sobel_y[row, col][2] ** 2)))
    return img_grad
Exemplo n.º 17
0
    def testFloat(self):
        num = np.float(256.2013)
        self.assertEqual(np.float(ujson.decode(ujson.encode(num))), num)

        num = np.float16(256.2013)
        self.assertEqual(np.float16(ujson.decode(ujson.encode(num))), num)

        num = np.float32(256.2013)
        self.assertEqual(np.float32(ujson.decode(ujson.encode(num))), num)

        num = np.float64(256.2013)
        self.assertEqual(np.float64(ujson.decode(ujson.encode(num))), num)
Exemplo n.º 18
0
    def testFloatMax(self):
        num = np.float(np.finfo(np.float).max / 10)
        assert_approx_equal(np.float(ujson.decode(ujson.encode(num))), num, 15)

        num = np.float16(np.finfo(np.float16).max / 10)
        assert_approx_equal(np.float16(ujson.decode(ujson.encode(num))), num, 15)

        num = np.float32(np.finfo(np.float32).max / 10)
        assert_approx_equal(np.float32(ujson.decode(ujson.encode(num))), num, 15)

        num = np.float64(np.finfo(np.float64).max / 10)
        assert_approx_equal(np.float64(ujson.decode(ujson.encode(num))), num, 15)
Exemplo n.º 19
0
def get_resource_data(group_serv, DFR_RAM, DFR_CPU):
    D=pd.DataFrame()
    for G in DFR_RAM.Group.unique():
        if group_serv in G:
            print(G)
            df=pd.DataFrame()
            fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 5))
            ax0, ax1= axes.flat

            d=DFR_RAM[DFR_RAM.Group==G]
            #print(d)
            D=D.append(d)
            X, Y=d.shape
            ind = np.arange(X)
            width = 0.5
            #d=d[['server','Used', 'Available']]
            p1 = ax0.bar(ind, d.Used.values,  color='red', label='Used_memory_Gb')
            p2 = ax0.bar(ind, d.Available.values, color='green', bottom=d.Used.values, label='Available')
            ax0.set_xticks(range(d.shape[0]))
            ax0.set_xticklabels([str(x) for x in d.server.values], rotation=90)
            ax0.set_title(G+" Memory usage Gb")
            ax0.get_legend()
            ax0.set_xlabel("KB")
            df=df.append(pd.DataFrame({"Group":G, "Number":d.shape[0],"Resource":"Memory","Sum_used":d.Used.sum(),"Sum_capacity":d.Capacity.sum() }, index=['RAM']))


            d=DFR_CPU[DFR_CPU.Group==G]
            D=D.append(d)
            X, Y=d.shape
            ind = np.arange(X)
            width = 0.5
            #d=d[['server','Used', 'Available']]
            d['Used']=np.float16(d['Used'].str.replace(" %", ""))
            p1 = ax1.bar(ind, d.Used.values,  color='red', label='Used_CPU_%')
            p2 = ax1.bar(ind, d.Available.values, color='green', bottom=d.Used.values, label='Available')
            ax1.set_xticks(range(d.shape[0]))
            ax1.set_xticklabels([str(x) for x in d.server.values], rotation=90)
            ax1.set_title(G+" CPU usage %")
            ax1.set_xlabel("%")


            #fig.text(G)
            fig.set_label(G)
            fig.show

            df=df.append(pd.DataFrame({"Group":G, "Number":d.shape[0], "Resource":"CPU","Sum_used":d.Used.sum(),"Sum_capacity":100*d.Capacity.sum() }, index=['CPU']), ignore_index=True)
            display(df)
            print("Summary CPU ")
            print("Used "+str(d.Used.sum()/d.Capacity.sum())+" %")
            print("Summary RAM ")
            print("Used "+str(100*np.float16(df['Sum_used'][df.Resource=='Memory'].values/df.Sum_capacity[df.Resource=='Memory'].values)[0])+" %")
    return D
Exemplo n.º 20
0
def pack(data, ind=None, dims=None, sorting=False, axes=None):
    """Pack an RDD into a dense local array, with options for
    sorting, reshaping, and projecting based on keys

    Parameters
    ----------
    data : RDD of (tuple, array) pairs
        The data to pack into a local array

    ind : int, optional, default = None
        An index, if each record has multiple entries

    dims : Dimensions, optional, default = None
        Dimensions of the keys, for use with sorting and reshaping

    sorting : Boolean, optional, default = False
        Whether to sort the RDD before packing

    axes : int, optional, default = None
        Which axis to do maximum projection along

    Returns
    -------
    result : array
        A local numpy array with the RDD contents

    """

    if dims is None:
        dims = getdims(data)

    if axes is not None:
        nkeys = len(data.first()[0])
        data = data.map(lambda (k, v): (tuple(array(k)[arange(0, nkeys) != axes]), v)).reduceByKey(maximum)
        dims.min = list(array(dims.min)[arange(0, nkeys) != axes])
        dims.max = list(array(dims.max)[arange(0, nkeys) != axes])
        sorting = True  # will always need to sort because reduceByKey changes order

    if ind is None:
        result = data.map(lambda (_, v): float16(v)).collect()
        nout = size(result[0])
    else:
        result = data.map(lambda (_, v): float16(v[ind])).collect()
        nout = size(ind)

    if sorting is True:
        data = subtoind(data, dims.max)
        keys = data.map(lambda (k, _): int(k)).collect()
        result = array([v for (k, v) in sorted(zip(keys, result), key=lambda (k, v): k)])

    return squeeze(transpose(reshape(result, ((nout,) + dims.count())[::-1])))
Exemplo n.º 21
0
def Load_images(file_dir, file_name):
    
    if (os.path.exists(file_dir + file_name + '/' + file_name + '_images.npy')==False):

        img = Image.open(file_dir + file_name + '/' + file_name+'.tif')

        counter=0
        while True:
            try:
                img.seek(counter)
            except EOFError:
                break
            counter+=1

        #Default pic sizes
        n_pixels = 256

        #Initialize 3D image array
        n_frames = counter
        images_raw = np.zeros((n_frames, n_pixels, n_pixels), dtype = np.float16)

        print "n_frames: ", n_frames
        for i in range(n_frames): 
            try:
                img.seek(i)
                print "Loading frame: ", i
                images_raw [i] = np.float16(img)
                if i>9640:# and i%10==0: # 
                    im = plt.imshow(images_raw[i])
                    plt.title("Frame: " +str(i))
                    plt.show()

            except EOFError:
                break

        images_start= 0
        images_end = 9642
        images_raw=images_raw[images_start:images_end]

        print "Re-saving imaging array..."

        np.save(file_dir + file_name + '/' + file_name + '_images', images_raw)
        np.savetxt(file_dir + file_name + '/' + file_name + '_images_start_'+str(images_start)+'_end_'+
        str(images_end), [images_start, images_end])

        quit()

    else:
        images_raw = np.load(file_dir + file_name + '/' + file_name + '_images.npy')
        images_raw = np.float16(images_raw)
        return images_raw
def test_is_float():
    # is float
    assert isinstance(1.0, float) is True
    assert isinstance(np.float(1.0), float) is True
    assert isinstance(np.float16(1.0), float) is False
    assert isinstance(np.float32(1.0), float) is False
    assert isinstance(np.float64(1.0), float) is True

    # is np.float
    assert isinstance(1.0, np.float) is True
    assert isinstance(np.float(1.0), np.float) is True
    assert isinstance(np.float16(1.0), np.float) is False
    assert isinstance(np.float32(1.0), np.float) is False
    assert isinstance(np.float64(1.0), np.float) is True
Exemplo n.º 23
0
 def test_is_float(self):
     
     self.assertIsInstance(1.0, float)                   # Yes
     self.assertIsInstance(np.float(1.0), float)         # Yes
     self.assertIsInstance(np.float64(1.0), float)       # Yes
                 
     self.assertNotIsInstance(np.float16(1.0), float)    # No
     self.assertNotIsInstance(np.float32(1.0), float)    # No
     
     self.assertIsInstance(1.0, np.float)                # Yes
     self.assertIsInstance(np.float64(1.0), np.float)    # Yes
     
     self.assertNotIsInstance(np.float16(1.0), np.float) # No
     self.assertNotIsInstance(np.float32(1.0), np.float) # No
Exemplo n.º 24
0
def convert_R11G11B10_FLOAT(buf):
	# Extract the channels with shifts and masks, add the implicit 1 bit to
	# the mantissas and unbias the exponents:
	rm = np.int8   (((buf >>  0) & 0x3f) | 0x40)
	re = np.float16(((buf >>  6) & 0x1f)) - 15
	gm = np.int8   (((buf >> 11) & 0x3f) | 0x40)
	ge = np.float16(((buf >> 17) & 0x1f)) - 15
	bm = np.int8   (((buf >> 22) & 0x1f) | 0x20)
	be = np.float16(((buf >> 27) & 0x1f)) - 15
	# Calculate floating point values and scale:
	r = scale_float(rm * (2**re))
	g = scale_float(gm * (2**ge))
	b = scale_float(bm * (2**be))
	return np.uint8(np.column_stack((r, g, b)))
Exemplo n.º 25
0
    def test_Complex(self):
        prop = Complex()

        self.assertTrue(prop.is_valid(None))
        # TODO: self.assertFalse(prop.is_valid(False))
        # TODO: self.assertFalse(prop.is_valid(True))
        self.assertTrue(prop.is_valid(0))
        self.assertTrue(prop.is_valid(1))
        self.assertTrue(prop.is_valid(0.0))
        self.assertTrue(prop.is_valid(1.0))
        self.assertTrue(prop.is_valid(1.0 + 1.0j))
        self.assertFalse(prop.is_valid(""))
        self.assertFalse(prop.is_valid(()))
        self.assertFalse(prop.is_valid([]))
        self.assertFalse(prop.is_valid({}))
        self.assertFalse(prop.is_valid(Foo()))

        try:
            import numpy as np

            # TODO: self.assertFalse(prop.is_valid(np.bool8(False)))
            # TODO: self.assertFalse(prop.is_valid(np.bool8(True)))
            self.assertTrue(prop.is_valid(np.int8(0)))
            self.assertTrue(prop.is_valid(np.int8(1)))
            self.assertTrue(prop.is_valid(np.int16(0)))
            self.assertTrue(prop.is_valid(np.int16(1)))
            self.assertTrue(prop.is_valid(np.int32(0)))
            self.assertTrue(prop.is_valid(np.int32(1)))
            self.assertTrue(prop.is_valid(np.int64(0)))
            self.assertTrue(prop.is_valid(np.int64(1)))
            self.assertTrue(prop.is_valid(np.uint8(0)))
            self.assertTrue(prop.is_valid(np.uint8(1)))
            self.assertTrue(prop.is_valid(np.uint16(0)))
            self.assertTrue(prop.is_valid(np.uint16(1)))
            self.assertTrue(prop.is_valid(np.uint32(0)))
            self.assertTrue(prop.is_valid(np.uint32(1)))
            self.assertTrue(prop.is_valid(np.uint64(0)))
            self.assertTrue(prop.is_valid(np.uint64(1)))
            self.assertTrue(prop.is_valid(np.float16(0)))
            self.assertTrue(prop.is_valid(np.float16(1)))
            self.assertTrue(prop.is_valid(np.float32(0)))
            self.assertTrue(prop.is_valid(np.float32(1)))
            self.assertTrue(prop.is_valid(np.float64(0)))
            self.assertTrue(prop.is_valid(np.float64(1)))
            self.assertTrue(prop.is_valid(np.complex64(1.0 + 1.0j)))
            self.assertTrue(prop.is_valid(np.complex128(1.0 + 1.0j)))
            self.assertTrue(prop.is_valid(np.complex256(1.0 + 1.0j)))
        except ImportError:
            pass
Exemplo n.º 26
0
    def _remap(self, dependency_dims, derived_dims):
        if derived_dims:
            ndim = max(derived_dims) + 1
        else:
            ndim = 1

        nd_points_by_key = {}
        for key, coord in self.dependencies.iteritems():
            if coord:
                # Get the points as consistent with the Cube.
                nd_points = self._nd_points(coord, dependency_dims[key], ndim)

                # Restrict to just the dimensions relevant to the derived coord.
                # NB. These are always in Cube-order, so no transpose is needed.
                shape = []
                for dim in derived_dims:
                    shape.append(nd_points.shape[dim])
                # Ensure the array always has at least one dimension to be
                # compatible with normal coordinates.
                if not derived_dims:
                    shape.append(1)
                nd_points.shape = shape
            else:
                # If no coord, treat value as zero.
                # Use a float16 to provide `shape` attribute and avoid
                # promoting other arguments to a higher precision.
                nd_points = np.float16(0)

            nd_points_by_key[key] = nd_points
        return nd_points_by_key
Exemplo n.º 27
0
 def test_convert_np_float(self):
     u = Unit('mile')
     v = Unit('meter')
     self.assertEqual(u.convert(np.float(1.0), v), 1609.344)
     self.assertEqual(u.convert(np.float16(1.0), v), 1609.344)
     self.assertEqual(u.convert(np.float32(1.0), v), 1609.344)
     self.assertEqual(u.convert(np.float64(1.0), v), 1609.344)
Exemplo n.º 28
0
def predict(mdl, img, patch_size, patch_step, batch_size, dim_img):
    """
    the cnn model for image transformation


    Parameters
    ----------
    img : array
        The image need to be calculated

    patch_size : (int, int)
        The patches dimension

    dim_img : int
        The input image dimension

    Returns
    -------
    img_rec
        Description.

      """
    img = np.float16(utils.nor_data(img))
    img_y, img_x = img.shape
    x_img = utils.extract_patches(img, patch_size, patch_step)
    x_img = np.reshape(x_img, (len(x_img), 1, dim_img, dim_img))
    y_img = mdl.predict(x_img, batch_size=batch_size)
    del x_img
    y_img = np.reshape(y_img, (len(y_img), dim_img, dim_img))
    img_rec = utils.reconstruct_patches(y_img, (img_y, img_x), patch_step)
    return img_rec
Exemplo n.º 29
0
    def test_half_coercion(self):
        """Test that half gets coerced properly with the other types"""
        a16 = np.array((1,),dtype=float16)
        a32 = np.array((1,),dtype=float32)
        b16 = float16(1)
        b32 = float32(1)

        assert_equal(np.power(a16,2).dtype, float16)
        assert_equal(np.power(a16,2.0).dtype, float16)
        assert_equal(np.power(a16,b16).dtype, float16)
        assert_equal(np.power(a16,b32).dtype, float16)
        assert_equal(np.power(a16,a16).dtype, float16)
        assert_equal(np.power(a16,a32).dtype, float32)

        assert_equal(np.power(b16,2).dtype, float64)
        assert_equal(np.power(b16,2.0).dtype, float64)
        assert_equal(np.power(b16,b16).dtype, float16)
        assert_equal(np.power(b16,b32).dtype, float32)
        assert_equal(np.power(b16,a16).dtype, float16)
        assert_equal(np.power(b16,a32).dtype, float32)

        assert_equal(np.power(a32,a16).dtype, float32)
        assert_equal(np.power(a32,b16).dtype, float32)
        assert_equal(np.power(b32,a16).dtype, float16)
        assert_equal(np.power(b32,b16).dtype, float32)
Exemplo n.º 30
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
Exemplo n.º 31
0
	def __init__(self):
		self._lock=threading.RLock()
		self._running=False
		#load calibration parameters
		with open('calibration/Sawyer.yaml') as file:
			H_Sawyer 	= np.array(yaml.load(file)['H'],dtype=np.float64)
		with open('calibration/UR.yaml') as file:
			H_UR 		= np.array(yaml.load(file)['H'],dtype=np.float64)
		with open('calibration/ABB.yaml') as file:
			H_ABB 	= np.array(yaml.load(file)['H'],dtype=np.float64)
		with open('calibration/tx60.yaml') as file:
			H_tx60 	= np.array(yaml.load(file)['H'],dtype=np.float64)
		self.H_UR=H42H3(H_UR)
		self.H_Sawyer=H42H3(H_Sawyer)
		self.H_ABB=H42H3(H_ABB)
		self.H_tx60=H42H3(H_tx60)
		self.H_robot={'ur':self.H_UR,'sawyer':self.H_Sawyer,'abb':self.H_ABB,'staubli':self.H_tx60}
		
		self.distance_report=RRN.GetStructureType("edu.rpi.robotics.distance.distance_report")
		self.dict={'ur':0,'sawyer':1,'abb':2,'staubli':3}
		self.distance_report_dict={}
		for robot_name,robot_idx in self.dict.items():
			self.distance_report_dict[robot_name]=self.distance_report()

		#connect to RR gazebo plugin service
		server=RRN.ConnectService('rr+tcp://localhost:11346/?service=GazeboServer')
		self.w=server.get_worlds(str(server.world_names[0]))
		#create RR pose type
		pose_dtype = RRN.GetNamedArrayDType("com.robotraconteur.geometry.Pose", server)
		self.model_pose = np.zeros((1,), dtype = pose_dtype)

		#form H into RR transformation struct
		self.transformations={}
		self.transformation=RRN.GetStructureType("edu.rpi.robotics.distance.transformation")

		transformation1=self.transformation()
		transformation1.name="UR"
		transformation1.row=len(self.H_UR)
		transformation1.column=len(self.H_UR[0])
		transformation1.H=np.float16(self.H_UR).flatten().tolist()
		self.transformations['ur']=transformation1

		transformation2=self.transformation()
		transformation2.name="Sawyer"
		transformation2.row=len(self.H_Sawyer)
		transformation2.column=len(self.H_Sawyer[0])
		transformation2.H=np.float16(self.H_Sawyer).flatten().tolist()
		self.transformations['sawyer']=transformation2

		transformation3=self.transformation()
		transformation3.name="ABB"
		transformation3.row=len(self.H_ABB)
		transformation3.column=len(self.H_ABB[0])
		transformation3.H=np.float16(self.H_ABB).flatten().tolist()
		self.transformations['abb']=transformation3

		transformation4=self.transformation()
		transformation4.name="Staubli"
		transformation4.row=len(self.H_tx60)
		transformation4.column=len(self.H_tx60[0])
		transformation4.H=np.float16(self.H_tx60).flatten().tolist()
		self.transformations['staubli']=transformation4
		
		
		

		#Connect to robot service
		with open('client_yaml/client_ur.yaml') as file:
			self.url_ur= yaml.load(file)['url']
		with open('client_yaml/client_sawyer.yaml') as file:
			self.url_sawyer= yaml.load(file)['url']
		with open('client_yaml/client_abb.yaml') as file:
			self.url_abb= yaml.load(file)['url']
		with open('client_yaml/client_staubli.yaml') as file:
			self.url_tx60= yaml.load(file)['url']

		self.ur_sub=RRN.SubscribeService(self.url_ur)
		self.sawyer_sub=RRN.SubscribeService(self.url_sawyer)
		self.abb_sub=RRN.SubscribeService(self.url_abb)
		self.tx60_sub=RRN.SubscribeService(self.url_tx60)


		UR_state=self.ur_sub.SubscribeWire("robot_state")
		Sawyer_state=self.sawyer_sub.SubscribeWire("robot_state")
		ABB_state=self.abb_sub.SubscribeWire("robot_state")
		tx60_state=self.tx60_sub.SubscribeWire("robot_state")

		#link and joint names in urdf
		Sawyer_joint_names=["right_j0","right_j1","right_j2","right_j3","right_j4","right_j5","right_j6"]
		UR_joint_names=["shoulder_pan_joint","shoulder_lift_joint","elbow_joint","wrist_1_joint","wrist_2_joint","wrist_3_joint"]
		Sawyer_link_names=["right_l0","right_l1","right_l2","right_l3","right_l4","right_l5","right_l6","right_l1_2","right_l2_2","right_l4_2","right_hand"]
		UR_link_names=['UR_base_link',"shoulder_link","upper_arm_link","forearm_link","wrist_1_link","wrist_2_link","wrist_3_link"]
		ABB_joint_names=['ABB1200_joint_1','ABB1200_joint_2','ABB1200_joint_3','ABB1200_joint_4','ABB1200_joint_5','ABB1200_joint_6']
		ABB_link_names=['ABB1200_base_link','ABB1200_link_1','ABB1200_link_2','ABB1200_link_3','ABB1200_link_4','ABB1200_link_5','ABB1200_link_6']
		tx60_joint_names=['tx60_joint_1','tx60_joint_2','tx60_joint_3','tx60_joint_4','tx60_joint_5','tx60_joint_6']
		tx60_link_names=['tx60_base_link','tx60_link_1','tx60_link_2','tx60_link_3','tx60_link_4','tx60_link_5','tx60_link_6']

		self.robot_state_list=[UR_state,Sawyer_state,ABB_state,tx60_state]
		self.robot_link_list=[UR_link_names,Sawyer_link_names,ABB_link_names,tx60_link_names]
		self.robot_joint_list=[UR_joint_names,Sawyer_joint_names,ABB_joint_names,tx60_joint_names]
		self.num_robot=len(self.robot_state_list)

		######tesseract environment setup:
		self.t_env = Environment()
		urdf_path = FilesystemPath("urdf/combined.urdf")
		srdf_path = FilesystemPath("urdf/combined.srdf")
		assert self.t_env.init(urdf_path, srdf_path, GazeboModelResourceLocator())

		#update robot poses based on calibration file
		self.t_env.changeJointOrigin("ur_pose", Isometry3d(H_UR))
		self.t_env.changeJointOrigin("sawyer_pose", Isometry3d(H_Sawyer))
		self.t_env.changeJointOrigin("abb_pose", Isometry3d(H_ABB))
		self.t_env.changeJointOrigin("staubli_pose", Isometry3d(H_tx60))

		contact_distance=0.2
		monitored_link_names = self.t_env.getLinkNames()
		self.manager = self.t_env.getDiscreteContactManager()
		self.manager.setActiveCollisionObjects(monitored_link_names)
		self.manager.setContactDistanceThreshold(contact_distance)
		# viewer update
		self.viewer = TesseractViewer()
		self.viewer.update_environment(self.t_env, [0,0,0])
		self.viewer.start_serve_background()

		self.robot_def_dict={}
		try:
			UR=self.ur_sub.GetDefaultClientWait(1)
			self.robot_def_dict['ur']=Robot(np.transpose(np.array(UR.robot_info.chains[0].H.tolist())),np.transpose(np.array(UR.robot_info.chains[0].P.tolist())),np.zeros(len(UR.robot_info.joint_info)))
		except:
			pass
		try:
			Sawyer=self.sawyer_sub.GetDefaultClientWait(1)
			self.robot_def_dict['sawyer']=Robot(np.transpose(np.array(Sawyer.robot_info.chains[0].H.tolist())),np.transpose(np.array(Sawyer.robot_info.chains[0].P.tolist())),np.zeros(len(Sawyer.robot_info.joint_info)))
		except:
			pass

		try:
			ABB=self.abb_sub.GetDefaultClientWait(1)
			self.robot_def_dict['abb']=Robot(np.transpose(np.array(ABB.robot_info.chains[0].H.tolist())),np.transpose(np.array(ABB.robot_info.chains[0].P.tolist())),np.zeros(len(ABB.robot_info.joint_info)))
		except:
			pass
		try:
			Staubli=self.tx60_sub.GetDefaultClientWait(1)
			self.robot_def_dict['staubli']=Robot(np.transpose(np.array(Staubli.robot_info.chains[0].H.tolist())),np.transpose(np.array(Staubli.robot_info.chains[0].P.tolist())),np.zeros(len(Staubli.robot_info.joint_info)))
		except:
			pass

		#trajectories
		self.traj_change=False
		self.traj_change_name=None
		self.steps=300
		self.plan_time=0.15
		self.execution_delay=0.03
		self.trajectory={'ur':np.zeros((self.steps,7)),'sawyer':np.zeros((self.steps,8)),'abb':np.zeros((self.steps,7)),'staubli':np.zeros((self.steps,7))}
		self.traj_joint_names={'ur':['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint'],
		'sawyer':['right_j0', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6'],
		'abb':['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6'],
		'staubli':['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6']
		}
		self.time_step=0.03
		#initialize static trajectories
		for key, value in self.trajectory.items():
			for i in range(self.steps):
				try:
					value[i]=np.append([0],self.robot_state_list[self.dict[key]].InValue.joint_position)
				except:
					traceback.print_exc()
					value[i]=np.append([0],[0,0,0,0,0,0])
		self.inv={'ur':inv_ur,'sawyer':inv_sawyer,'abb':inv_abb,'staubli':inv_staubli}
		self.joint_names_traj={'ur':inv_ur,'sawyer':inv_sawyer,'abb':inv_abb,'staubli':inv_staubli}

		

		#register service constant
		self.JointTrajectoryWaypoint = RRN.GetStructureType("com.robotraconteur.robotics.trajectory.JointTrajectoryWaypoint")
		self.JointTrajectory = RRN.GetStructureType("com.robotraconteur.robotics.trajectory.JointTrajectory")
Exemplo n.º 32
0
def nearest_by_svlen_overlap(df_source, df_target, max_dist, size_sim, restrict_samples=False):
    """
    For each variant in df_source, get the nearest variant in df_target. Both dataframes must contain fields
    "#CHROM", "POS", "END", and "SVLEN" (which is the absolute value, no negative SVLEN for DELs). Return a dataframe
    with each source variant ("ID") with the best match by distance and then by size overlap ("TARGET_ID") along with
    distance ("DISTANCE"), reciprocal overlap if variants intersect ("RO", 0 if no overlap), and size proportion
    ("TARGET_SIZE_PROP", target-size / source-size).

    :param df_source: Source dataframe.
    :param df_target: Target dataframe.
    :param size_sim: Length proportion of matches.
    :param restrict_samples: If `True` and both dataframes contain a `MERGE_SAMPLES` column, then restrict matches to
        only those that share samples.

    :return: A dataframe with "ID", "TARGET_ID", "DISTANCE", "RO", and "TARGET_SIZE_PROP".
    """

    # Check for expected columns
    if any(col not in df_source.columns for col in ('#CHROM', 'POS', 'END', 'SVLEN', 'ID')):
        raise RuntimeError('Source Dataframe missing at least one column in ("#CHROM", "POS", "END", "SVLEN"): {}')

    if any(col not in df_target.columns for col in ('#CHROM', 'POS', 'END', 'SVLEN', 'ID')):
        raise RuntimeError('Target Dataframe missing at least one column in ("#CHROM", "POS", "END", "SVLEN"): {}')

    # IDs must be unique
    if len(set(df_source['ID'])) != df_source.shape[0]:
        raise RuntimeError('Source Dataframe IDs are not unique')

    if len(set(df_target['ID'])) != df_target.shape[0]:
        raise RuntimeError('target Dataframe IDs are not unique')

    # Determine if variants are matched on MERGE_SAMPLES
    if restrict_samples and 'MERGE_SAMPLES' in df_source.columns and 'MERGE_SAMPLES' in df_target.columns:
        restrict_samples = True
    else:
        restrict_samples = False

    # Subset and cast to int16 (default int64 uses more memory and is not needed)
    if restrict_samples:
        subset_cols = ('#CHROM', 'POS', 'END', 'SVLEN', 'ID', 'MERGE_SAMPLES')
        col_map = {
            '#CHROM': np.object, 'POS': np.int32, 'END': np.int32, 'SVLEN': np.int32, 'ID': np.object,
            'MERGE_SAMPLES': np.object
        }

    else:
        subset_cols = ('#CHROM', 'POS', 'END', 'SVLEN', 'ID')
        col_map = {
            '#CHROM': np.object, 'POS': np.int32, 'END': np.int32, 'SVLEN': np.int32, 'ID': np.object
        }

    stats_box = OrderedDict()
    stats_box["TP-base"] = 0
    stats_box["TP-call"] = 0
    stats_box["FP"] = 0
    stats_box["FN"] = 0
    stats_box["precision"] = 0
    stats_box["recall"] = 0
    stats_box["f1"] = 0
    stats_box["base cnt"] = 0
    stats_box["call cnt"] = 0

    df_source = df_source.loc[:, subset_cols]
    df_target = df_target.loc[:, subset_cols]

    # df_source.set_index('ID', inplace=True)
    copy_df_targat = df_target.set_index('ID', inplace=False)

    matched_calls = defaultdict(bool)

    if size_sim <= np.float16(0.0) or size_sim >= np.float16(1.0):
        raise RuntimeError('Length proportion must be between 0 and 1 (exclusive): {}'.format(size_sim))

    # Nearest by #CHROM
    overlap_list = list()  # Dataframe for each chrom (to be merged into one dataframe)

    for chrom in sorted(set(df_source['#CHROM'])):

        df_source_chr = df_source.loc[df_source['#CHROM'] == chrom]
        df_target_chr = copy_df_targat.loc[copy_df_targat['#CHROM'] == str(chrom)]

        # Target has no values on this chrom, all calls in source file are missed
        if df_target_chr.shape[0] == 0:
            stats_box["FN"] += df_source_chr.shape[0]
            continue

        for index, source_row in df_source_chr.iterrows():
            stats_box["base cnt"] += 1

            pos = source_row['POS']
            end = source_row['END']

            source_id = source_row['ID']

            # print(df_source_chr.columns)
            min_len = np.int32(source_row['SVLEN'] * size_sim)
            max_len = np.int32(source_row['SVLEN'] * (2 - size_sim))

            # Subset target - Skip if no targets within range
            df_target_chr_len = df_target_chr.loc[
                (df_target_chr['SVLEN'] >= min_len) & (df_target_chr['SVLEN'] <= max_len)
            ]
            # Cannot find targes of similar size to the source
            if df_target_chr_len.shape[0] == 0:
                stats_box["FN"] += 1
                continue


            # Get distance to all target records
            distance = df_target_chr_len.apply(
                lambda row: row['POS'] - end if row['POS'] > end else (
                    pos - row['END'] if pos > row['END'] else 0
                ),
                axis=1
            )

            # Assign index and subset minimum
            min_distance = np.min(distance)

            # Only consider potential match within max distance
            if min_distance > max_dist:
                stats_box["FN"] += 1
                continue

            distance = distance.loc[distance == min_distance]

            # Multiple match continue or find the best match
            if len(distance) > 1:
                match_id = np.argmin(np.abs(df_target_chr_len.loc[distance.index, 'SVLEN'] - source_row['SVLEN']))
                continue
            else:
                # Only one best match, get ID
                match_id = distance.index[0]

            # Save base ID to matched calls
            if not matched_calls[source_row["ID"]]:
                stats_box["TP-base"] += 1
            matched_calls[source_row["ID"]] = True


            if not matched_calls[match_id]:
                stats_box["TP-call"] += 1
            matched_calls[match_id] = True

            # Save match record
            overlap_list.append((index, source_id, match_id, min_distance, len(distance)))

    do_stats_math = True
    if stats_box["TP-base"] == 0 and stats_box["FN"] == 0:
        logging.warning("No TP or FN calls in base!")
        do_stats_math = False
    elif stats_box["TP-call"] == 0 and stats_box["FP"] == 0:
        logging.warning("No TP or FP calls in base!")
        do_stats_math = False
    else:
        logging.info("Results peek: %d TP-base %d FN %.2f%% Recall", stats_box["TP-base"], stats_box["FN"],
                     100 * (float(stats_box["TP-base"]) / (stats_box["TP-base"] + stats_box["FN"])))

    for index, entry in df_target.iterrows():
        # print(entry)
        if not matched_calls[entry["ID"]]:
            stats_box['FP'] += 1

    if do_stats_math:
        stats_box["precision"] = float(stats_box["TP-call"]) / (stats_box["TP-call"] + stats_box["FP"])
        stats_box["recall"] = float(stats_box["TP-base"]) / (stats_box["TP-base"] + stats_box["FN"])

    # f-measure
    neum = stats_box["recall"] * stats_box["precision"]
    denom = stats_box["recall"] + stats_box["precision"]
    if denom != 0:
        stats_box["f1"] = 2 * (neum / denom)
    else:
        stats_box["f1"] = "NaN"

    stats_box["call cnt"] = stats_box["TP-base"] + stats_box["FP"]

    # Merge records
    df = pd.DataFrame(overlap_list, columns=('ID', 'BASE_ID', 'TARGET_ID', 'DISTANCE','TARGET_MATCHES'))

    # Annotate overlap and size proportion (RO and TARGET_SIZE_PROP).
    df['#CHROM'] = list(df_source.loc[df['ID'], '#CHROM'])
    df['POS'] = list(df_source.loc[df['ID'], 'POS'])
    df['END'] = list(df_source.loc[df['ID'], 'END'])
    df['SVLEN'] = list(df_source.loc[df['ID'], 'SVLEN'])



    df['TARGET_POS'] = list(copy_df_targat.loc[df['TARGET_ID'], 'POS'])
    df['TARGET_END'] = list(copy_df_targat.loc[df['TARGET_ID'], 'END'])
    df['TARGET_SVLEN'] = list(copy_df_targat.loc[df['TARGET_ID'], 'SVLEN'])
    df['RO'] = df.apply(lambda row: reciprocal_overlap(*row.loc[['POS', 'END', 'TARGET_POS', 'TARGET_END']]), axis=1)

    # # Set TARGET_SIZE_PROP
    # df['TARGET_SIZE_PROP'] = round(df['TARGET_SVLEN'] / df['SVLEN'], 3)

    df['TARGET_SIZE_SIM'] = df.apply(
        lambda row: size_similarity(*row.loc[['TARGET_SVLEN', 'SVLEN']]), axis=1
    )

    # df['SIM_SCORE'] = df.apply(
    #     lambda row: sim_score(*row.loc[['RO', 'TARGET_SIZE_SIM', 'DISTANCE', 'SVLEN', 'TARGET_SVLEN']]), axis=1
    # )

    # Sort
    df.sort_values(['#CHROM', 'POS'])
    df = df.loc[:, ('TARGET_ID', 'BASE_ID', 'TARGET_MATCHES', 'RO','TARGET_SIZE_SIM')]

    # Return dataframe
    return df, stats_box
Exemplo n.º 33
0
 def testFloatTensor(self):
     self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype)  # pylint: disable=no-value-for-parameter
     self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype)  # pylint: disable=no-value-for-parameter
     self.assertEqual(dtypes.float16, _create_tensor(np.float16()).dtype)  # pylint: disable=no-value-for-parameter
     self.assertEqual(dtypes.float32, _create_tensor(0.0).dtype)
Exemplo n.º 34
0
def test_stationarity(ts):
    dftest = adfuller(ts, autolag='AIC')
    return np.float16(dftest[1])
Exemplo n.º 35
0
PATCH_WIDTH = 100
PATCH_HEIGHT = 100
PATCH_SIZE = PATCH_WIDTH * PATCH_HEIGHT * 3
batch_size = 5
dslr_ = tf.placeholder(tf.float32, [None, PATCH_SIZE])
dslr_image = tf.reshape(dslr_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 3])
adv_ = tf.placeholder(tf.float32, [None, 1])
dslr_gray = tf.reshape(tf.image.rgb_to_grayscale(dslr_image),
                       [-1, PATCH_WIDTH * PATCH_HEIGHT])
#adversarial_ = tf.multiply(enhanced_gray, 1 - adv_) + tf.multiply(dslr_gray, adv_)
adversarial_ = tf.multiply(dslr_gray, adv_)
adversarial_image = tf.reshape(adversarial_,
                               [-1, PATCH_HEIGHT, PATCH_WIDTH, 1])
discrim_target = tf.concat([adv_, 1 - adv_], 1)
with tf.Session() as sess:
    image = np.float16(
        misc.imread("test_image\\patches\\iphone\\iphone\\1.jpg")) / 255
    image_ = np.reshape(image, [1, PATCH_SIZE])
    all_zeros = np.reshape(np.zeros((batch_size, 1)), [batch_size, 1])

    print("discrim_target")
    print(sess.run(discrim_target, feed_dict={dslr_: image_, adv_: all_zeros}))
    print(discrim_target)
    print("dslr_gray")
    print(sess.run(dslr_gray, feed_dict={dslr_: image_, adv_: all_zeros}))
    print(dslr_gray)
    print('image_')
    print(image_.shape)
    print('all_zeros')
    print(all_zeros.shape)
 def get_machine_half_product(self):
     return np.float16(ProductSumTest.__MACHINE_NUMBER * self.__times)
Exemplo n.º 37
0
def halfToUInt16(halfValue):
    return np.frombuffer(np.getbuffer(np.float16(halfValue)), dtype=np.uint16)[0]
Exemplo n.º 38
0
import numpy as np

from artiq.protocols import pyon

_pyon_test_object = {
    (1, 2): [(3, 4.2), (2, )],
    Fraction(3, 4): np.linspace(5, 10, 1),
    "a": np.int8(9),
    "b": np.int16(-98),
    "c": np.int32(42),
    "d": np.int64(-5),
    "e": np.uint8(8),
    "f": np.uint16(5),
    "g": np.uint32(4),
    "h": np.uint64(9),
    "x": np.float16(9.0),
    "y": np.float32(9.0),
    "z": np.float64(9.0),
}


class PYON(unittest.TestCase):
    def test_encdec(self):
        for enc in pyon.encode, lambda x: pyon.encode(x, True):
            self.assertEqual(pyon.decode(enc(_pyon_test_object)),
                             _pyon_test_object)


_json_test_object = {
    "a": "b",
    "x": [1, 2, {}],
Exemplo n.º 39
0
    def test_experiment_handles_numpy_numbers(self):
        nums_to_test = [
            ("int_", np.int_()),
            ("intc", np.intc()),
            ("intp", np.intp()),
            ("int8", np.int8()),
            ("int16", np.int16()),
            ("int32", np.int32()),
            ("int64", np.int64()),
            ("uint8", np.uint8()),
            ("uint16", np.uint16()),
            ("uint32", np.uint32()),
            ("uint64", np.uint64()),
            ("float16", np.float16()),
            ("float32", np.float32()),
            ("float64", np.float64()),
        ]
        # Make sure the SDK doesn't choke and JSON serialization works
        exp = Experiment("MNIST")
        for name, num in nums_to_test:
            exp.metric("test_metric_{}".format(name), num)
            exp.param("test_param_{}".format(name), num)
        exp.end()

        # Test params match what is expected
        params_messages = []
        for msg in server_sdk_messages:
            payload = msg["payload"]
            if "params" in payload:
                params_messages.append(payload)

        expected_params = []
        for name, num in nums_to_test:
            obj = {
                "params": {},
                "is_internal": False,
            }
            obj["params"]["test_param_{}".format(name)] = num
            obj["is_internal"] = False
            expected_params.append(obj)

        assert len(expected_params) == len(params_messages)
        for i, message in enumerate(params_messages):
            print(message)
            print(expected_params[i])
            assert message == expected_params[i]

        # Test metrics match what is expected
        metrics_messages = []
        for msg in server_sdk_messages:
            payload = msg["payload"]
            if "name" in payload:
                metrics_messages.append(payload)

        expected_metrics = []
        for name, num in nums_to_test:
            expected_metrics.append({
                "name": "test_metric_{}".format(name),
                "value": num,
                "is_internal": False,
            })

        assert len(expected_metrics) == len(metrics_messages)
        for i, message in enumerate(metrics_messages):
            assert message == expected_metrics[i]
Exemplo n.º 40
0
    def __init__(self):
        self._lock = threading.RLock()
        self._running = False
        #load calibration parameters
        with open('calibration/Sawyer.yaml') as file:
            H_Sawyer = np.array(yaml.load(file)['H'], dtype=np.float64)
        with open('calibration/ABB.yaml') as file:
            H_ABB = np.array(yaml.load(file)['H'], dtype=np.float64)
        self.H_Sawyer = H42H3(H_Sawyer)
        self.H_ABB = H42H3(H_ABB)
        self.L2C = ['', '']

        #form H into RR transformation struct
        transformation = RRN.GetStructureType(
            "edu.rpi.robotics.distance.transformation")

        transformation1 = transformation()
        transformation1.name = "Sawyer"
        transformation1.row = len(self.H_Sawyer)
        transformation1.column = len(self.H_Sawyer[0])
        transformation1.H = np.float16(self.H_Sawyer).flatten().tolist()

        transformation2 = transformation()
        transformation2.name = "ABB"
        transformation2.row = len(self.H_ABB)
        transformation2.column = len(self.H_ABB[0])
        transformation2.H = np.float16(self.H_ABB).flatten().tolist()

        self.transformations = [transformation1, transformation2]

        #Connect to robot service
        Sawyer = RRN.ConnectService('rr+tcp://localhost:58654?service=robot')
        ABB = RRN.ConnectService('rr+tcp://localhost:58655?service=robot')
        Sawyer_state = Sawyer.robot_state.Connect()
        ABB_state = ABB.robot_state.Connect()

        #link and joint names in urdf
        Sawyer_joint_names = [
            "right_j0", "right_j1", "right_j2", "right_j3", "right_j4",
            "right_j5", "right_j6"
        ]
        Sawyer_link_names = [
            "right_l0", "right_l1", "right_l2", "right_l3", "right_l4",
            "right_l5", "right_l6", "right_l1_2", "right_l2_2", "right_l4_2",
            "right_hand"
        ]
        ABB_joint_names = [
            'ABB1200_joint_1', 'ABB1200_joint_2', 'ABB1200_joint_3',
            'ABB1200_joint_4', 'ABB1200_joint_5', 'ABB1200_joint_6'
        ]
        ABB_link_names = [
            'ABB1200_base_link', 'ABB1200_link_1', 'ABB1200_link_2',
            'ABB1200_link_3', 'ABB1200_link_4', 'ABB1200_link_5',
            'ABB1200_link_6'
        ]

        self.robot_state_list = [Sawyer_state, ABB_state]
        self.robot_link_list = [Sawyer_link_names, ABB_link_names]
        self.robot_joint_list = [Sawyer_joint_names, ABB_joint_names]
        self.num_robot = len(self.robot_state_list)
        self.distance_matrix = -np.ones(self.num_robot * self.num_robot)

        ######tesseract environment setup:

        with open("urdf/combined.urdf", 'r') as f:
            combined_urdf = f.read()
        with open("urdf/combined.srdf", 'r') as f:
            combined_srdf = f.read()
        t = tesseract.Tesseract()
        t.init(combined_urdf, combined_srdf, GazeboModelResourceLocator())
        self.t_env = t.getEnvironment()
        #update robot poses based on calibration file
        self.t_env.changeJointOrigin("sawyer_pose", H_Sawyer)
        self.t_env.changeJointOrigin("abb_pose", H_ABB)

        contact_distance = 0.1
        monitored_link_names = self.t_env.getLinkNames()
        self.manager = self.t_env.getDiscreteContactManager()
        self.manager.setActiveCollisionObjects(monitored_link_names)
        self.manager.setContactDistanceThreshold(contact_distance)
Exemplo n.º 41
0
    def distance_check(self, robot_idx):
        with self._lock:
            distance_report = RRN.GetStructureType(
                "edu.rpi.robotics.distance.distance_report")
            distance_report1 = distance_report()
            #update other robot's joints
            for i in range(self.num_robot):
                robot_joints = self.robot_state_list[i].InValue.joint_position
                self.t_env.setState(self.robot_joint_list[i], robot_joints)

            env_state = self.t_env.getCurrentState()
            self.manager.setCollisionObjectsTransform(
                env_state.link_transforms)
            contacts = self.manager.contactTest(2)

            contact_vector = tesseract.flattenResults(contacts)

            distances = np.array([c.distance for c in contact_vector])
            nearest_points = np.array(
                [c.nearest_points for c in contact_vector])
            names = np.array([c.link_names for c in contact_vector])
            # nearest_index=np.argmin(distances)

            min_distance = 9
            min_index = -1
            Closest_Pt = [0., 0., 0.]
            Closest_Pt_env = [0., 0., 0.]
            #initialize
            distance_report1.Closest_Pt = Closest_Pt
            distance_report1.Closest_Pt_env = Closest_Pt_env

            for i in range(len(distances)):

                #only 1 in 2 collision "objects"
                if (names[i][0] in self.robot_link_list[robot_idx]
                        or names[i][1] in self.robot_link_list[robot_idx]
                    ) and distances[i] < min_distance and not (
                        names[i][0] in self.robot_link_list[robot_idx]
                        and names[i][1] in self.robot_link_list[robot_idx]):
                    min_distance = distances[i]
                    min_index = i

            J2C = 0
            if (min_index != -1):
                if names[min_index][0] in self.robot_link_list[
                        robot_idx] and names[min_index][
                            1] in self.robot_link_list[robot_idx]:
                    stop = 1
                    print("stop")
                elif names[min_index][0] in self.robot_link_list[robot_idx]:
                    J2C = self.robot_link_list[robot_idx].index(
                        names[min_index][0])
                    Closest_Pt = nearest_points[min_index][0]
                    Closest_Pt_env = nearest_points[min_index][1]
                    print(names[min_index])
                    print(distances[min_index])
                elif names[min_index][1] in self.robot_link_list[robot_idx]:
                    J2C = self.robot_link_list[robot_idx].index(
                        names[min_index][1])
                    Closest_Pt = nearest_points[min_index][1]
                    Closest_Pt_env = nearest_points[min_index][0]
                    print(names[min_index])
                    print(distances[min_index])

                if robot_idx == 0:
                    J2C = self.Sawyer_link(J2C)

                distance_report1.Closest_Pt = np.float16(
                    Closest_Pt).flatten().tolist()
                distance_report1.Closest_Pt_env = np.float16(
                    Closest_Pt_env).flatten().tolist()
                distance_report1.min_distance = np.float16(
                    distances[min_index])
                distance_report1.J2C = J2C

                return distance_report1

            return distance_report1
Exemplo n.º 42
0
    def poplulate_file_queue(self):
        location = self.data_source
        image_list = location + "data.txt"
        dct = defaultdict(list)
        tmp_reference_image = []
        tmp_distorted_image = []
        tmp_scores = []
        with open(location + "data.txt") as f:
            file_contents = f.read()
            lines = file_contents.split('\n')
            lines = [item for item in lines if item != '']
            old_content = '-1'
            for i in range(len(lines)):  # for the full dataset
                if i == 0:
                    continue
                line_data = lines[i].split('\t')
                line_data

                dfilename = location + str(line_data[3]) + '\\' + str(
                    line_data[0]) + '.' + str(line_data[3]) + '.' + str(
                        line_data[4]) + '.png'
                rfilename = location + str(line_data[0]) + '.png'

                score = np.float16(line_data[6])
                content = int(line_data[1])
                ntype = int(line_data[2])
                nlevel = line_data[4]
                dct[rfilename].append(i - 1)
                tmp_reference_image.append(rfilename)
                tmp_distorted_image.append(dfilename)
                tmp_scores.append(score)


#                print(dfilename)

# Seperate images based on reference image content
        self.dist = tmp_distorted_image
        self.ref = tmp_reference_image
        self.mos = tmp_scores
        index_of_contents = []
        c_length = len(dct.keys())
        for count, key in enumerate(dct.keys()):
            index_of_contents.append(dct[key])
        [train_content, test_content] = train_test_split(c_length, ratio=0.99)
        file_id_list_train = []
        for indexs in train_content:
            file_id_list_train += index_of_contents[indexs]

        file_id_list_test = []
        for indexs in test_content:
            file_id_list_test += index_of_contents[indexs]
        # Global storate of file list
        self.file_reference_image_train = []
        self.file_distorted_image_train = []
        self.MOS_scores_train = []
        self.file_reference_image_test = []
        self.file_distorted_image_test = []
        self.MOS_scores_test = []

        for file_idxs in file_id_list_train:

            self.file_distorted_image_train.append(
                tmp_distorted_image[file_idxs])
            self.file_reference_image_train.append(
                tmp_reference_image[file_idxs])
            self.MOS_scores_train.append(tmp_scores[file_idxs])

        for file_idxs in file_id_list_test:

            self.file_distorted_image_test.append(
                tmp_distorted_image[file_idxs])
            self.file_reference_image_test.append(
                tmp_reference_image[file_idxs])
            self.MOS_scores_test.append(tmp_scores[file_idxs])
        self.refill_file_queues()
Exemplo n.º 43
0
def run( soltab, tecsoltabOut='tec000', clocksoltabOut='clock000', offsetsoltabOut='phase_offset000', tec3rdsoltabOut='tec3rd000', flagBadChannels=True, flagCut=5., chi2cut=3000., combinePol=False, removePhaseWraps=True, fit3rdorder=False, circular=False, reverse=False, invertOffset=False, nproc=10 ):
    """
    Separate phase solutions into Clock and TEC.
    The Clock and TEC values are stored in the specified output soltab with type 'clock', 'tec', 'tec3rd'.

    Parameters
    ----------
    flagBadChannels : bool, optional
        Detect and remove bad channel before fitting, by default True.

    flagCut : float, optional


    chi2cut : float, optional


    combinePol : bool, optional
        Find a combined polarization solution, by default False.

    removePhaseWraps : bool, optional
        Detect and remove phase wraps, by default True.

    fit3rdorder : bool, optional
        Fit a 3rd order ionospheric ocmponent (usefult <40 MHz). By default False.

    circular : bool, optional
        Assume circular polarization with FR not removed. By default False.

    reverse : bool, optional
        Reverse the time axis. By default False.

    invertOffset : bool, optional
        Invert (reverse the sign of) the phase offsets. By default False. Set to True
        if you want to use them with the residuals operation.
    """
    import numpy as np
    from ._fitClockTEC import doFit

    logging.info("Clock/TEC separation on soltab: "+soltab.name)

    # some checks
    solType = soltab.getType()
    if solType != 'phase':
       logging.warning("Soltab type of "+soltab.name+" is: "+solType+" should be phase. Ignoring.")
       return 1

    # Collect station properties
    solset = soltab.getSolset()
    station_dict = solset.getAnt()
    stations = soltab.getAxisValues('ant')
    station_positions = np.zeros((len(stations), 3), dtype=np.float)
    for i, station_name in enumerate(stations):
        station_positions[i, 0] = station_dict[station_name.encode()][0]
        station_positions[i, 1] = station_dict[station_name.encode()][1]
        station_positions[i, 2] = station_dict[station_name.encode()][2]

    returnAxes=['ant','freq','pol','time']
    for vals, flags, coord, selection in soltab.getValuesIter(returnAxes=returnAxes,weight=True):

        if len(coord['ant']) < 2:
            logging.error('Clock/TEC separation needs at least 2 antennas selected.')
            return 1
        if len(coord['freq']) < 10:
            logging.error('Clock/TEC separation needs at least 10 frequency channels, preferably distributed over a wide range')
            return 1

        freqs=coord['freq']
        stations=coord['ant']
        times=coord['time']

        # get axes index
        axes=[i for i in soltab.getAxesNames() if i in returnAxes]

        # reverse time axes
        if reverse:
            vals = np.swapaxes(np.swapaxes(vals, 0, axes.index('time'))[::-1], 0, axes.index('time'))
            flags = np.swapaxes(np.swapaxes(flags, 0, axes.index('time'))[::-1], 0, axes.index('time'))

        result=doFit(vals,flags==0,freqs,stations,station_positions,axes,\
                         flagBadChannels=flagBadChannels,flagcut=flagCut,chi2cut=chi2cut,combine_pol=combinePol,removePhaseWraps=removePhaseWraps,fit3rdorder=fit3rdorder,circular=circular,n_proc=nproc)
        if fit3rdorder:
            clock,tec,offset,tec3rd=result
            if reverse:
                clock = clock[::-1,:]
                tec = tec[::-1,:]
                tec3rd = tec3rd[::-1,:]
        else:
            clock,tec,offset=result
            if reverse:
                clock = clock[::-1,:]
                tec = tec[::-1,:]
        if invertOffset:
            offset *= -1.0

        weights=tec>-5
        tec[np.logical_not(weights)]=0
        clock[np.logical_not(weights)]=0
        weights=np.float16(weights)

        if combinePol or not 'pol' in soltab.getAxesNames():
            tf_st = solset.makeSoltab('tec', soltabName = tecsoltabOut,
                             axesNames=['time', 'ant'], axesVals=[times, stations],
                             vals=tec[:,:,0],
                             weights=weights[:,:,0])
            tf_st.addHistory('CREATE (by CLOCKTECFIT operation)')
            tf_st = solset.makeSoltab('clock', soltabName = clocksoltabOut,
                             axesNames=['time', 'ant'], axesVals=[times, stations],
                             vals=clock[:,:,0]*1e-9,
                             weights=weights[:,:,0])
            tf_st.addHistory('CREATE (by CLOCKTECFIT operation)')
            tf_st = solset.makeSoltab('phase', soltabName = offsetsoltabOut,
                             axesNames=['ant'], axesVals=[stations],
                             vals=offset[:,0],
                             weights=np.ones_like(offset[:,0],dtype=np.float16))
            tf_st.addHistory('CREATE (by CLOCKTECFIT operation)')
            if fit3rdorder:
                tf_st = solset.makeSoltab('tec3rd', soltabName = tec3rdsoltabOut,
                                     axesNames=['time', 'ant'], axesVals=[times, stations],
                                     vals=tec3rd[:,:,0],
                                     weights=weights[:,:,0])
        else:
            tf_st = solset.makeSoltab('tec', soltabName = tecsoltabOut,
                             axesNames=['time', 'ant','pol'], axesVals=[times, stations, ['XX','YY']],
                             vals=tec,
                             weights=weights)
            tf_st.addHistory('CREATE (by CLOCKTECFIT operation)')
            tf_st = solset.makeSoltab('clock', soltabName = clocksoltabOut,
                             axesNames=['time', 'ant','pol'], axesVals=[times, stations, ['XX','YY']],
                             vals=clock*1e-9,
                             weights=weights)
            tf_st.addHistory('CREATE (by CLOCKTECFIT operation)')
            tf_st = solset.makeSoltab('phase', soltabName = offsetsoltabOut,
                             axesNames=['ant','pol'], axesVals=[stations, ['XX','YY']],
                             vals=offset,
                             weights=np.ones_like(offset,dtype=np.float16))
            tf_st.addHistory('CREATE (by CLOCKTECFIT operation)')
            if fit3rdorder:
                tf_st = solset.makeSoltab('tec3rd', soltabName = tec3rdsoltabOut,
                                     axesNames=['time', 'ant','pol'], axesVals=[times, stations, ['XX','YY']],
                                     vals=tec3rd,
                                     weights=weights)
    return 0
Exemplo n.º 44
0
def detect_blobs(frame, thresh, meas_last, meas_now, last_heads, colors):

    # Function to find valid fly contours and return centroid coordinates
    # Returns new meas_now tuple with error handling to return last valid one if
    # 	no good measurements found

    # Generate contours with no discrimination
    _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_TREE,
                                      cv2.CHAIN_APPROX_SIMPLE)

    # Discriminate contours based on area percentage of total frame. Flies are generally between 0.001 and 0.01
    good_contours = []
    list_of_detections = []

    for i, contour in enumerate(contours):
        if ((cv2.contourArea(contour) / (thresh.shape[0]**2)) < 0.01) and (
            (cv2.contourArea(contour) / (thresh.shape[0]**2)) > 0.0001):
            good_contours.append(contour)

    # Second round of discrimination based on whether or not centroid is filled and if it is in masked "non-arena" area
    better_contours = []

    for i, contour in enumerate(good_contours):

        M = cv2.moments(contour)
        if M['m00'] != 0:

            cx = np.float16(M['m10'] / M['m00'])
            cy = np.float16(M['m01'] / M['m00'])

            center_coords = np.asarray(
                [int(frame.shape[0] / 2),
                 int(frame.shape[1] / 2)])
            dist_to_center = np.linalg.norm(center_coords -
                                            np.asarray([cx, cy]))

            if (dist_to_center > (frame.shape[0] / 2)):
                pass
            elif [int(cx), int(cy)] in [[0, 0], [0, frame.shape[0]],
                                        [frame.shape[0], 0], []]:
                pass
            else:
                # Check whether contour is mostly filled or not
                area = cv2.contourArea(contour)
                edge = int(np.sqrt(area))
                h = int(edge / 2)
                perim = cv2.arcLength(contour, True)
                window = thresh[(int(cy) - h):(int(cy) + h),
                                (int(cx) - h):(int(cx) + h)]

                if np.mean(window) != np.nan:

                    if (np.mean(window) > 60) and (np.mean(window) < 160):

                        better_contours.append(contour)

                        detection = Detection()
                        detection.add_centroid([cx, cy])

                        try:

                            ellipse = cv2.fitEllipse(contour)

                            MAG = ellipse[1][1] * 0.5
                            mag = ellipse[1][0] * 0.5

                            ellipseAngle = math.radians(ellipse[-1])

                            ellipseAngle = ellipseAngle - (math.pi / 2)
                            ellipseAngle2 = ellipseAngle - (math.pi)

                            x_ang = math.degrees(math.cos(ellipseAngle))
                            y_ang = math.degrees(math.sin(ellipseAngle))
                            fpx1 = int(cx - MAG * math.radians(x_ang))
                            fpy1 = int(cy - MAG * math.radians(y_ang))

                            x_ang2 = math.degrees(math.cos(ellipseAngle2))
                            y_ang2 = math.degrees(math.sin(ellipseAngle2))
                            fpx2 = int(cx - MAG * math.radians(x_ang2))
                            fpy2 = int(cy - MAG * math.radians(y_ang2))

                            # Need to determine which between [fpx1, fpy1], [fpx2, fpy2]
                            # is animal head, append along with cx, cy

                            # Do disoriented patch coords
                            disoriented_patch = cv2.boundingRect(contour)

                            # Make disoriented patch a bit bigger so full bigger animal can fit
                            scale_factor = 3
                            w = int(disoriented_patch[2] * scale_factor)
                            h = int(disoriented_patch[3] * scale_factor)

                            dim = np.max([w, h])

                            x = int((disoriented_patch[0]) -
                                    (dim - disoriented_patch[2]) / 2)
                            y = int((disoriented_patch[1]) -
                                    (dim - disoriented_patch[3]) / 2)

                            # Grab pixels from these dims from color img:
                            patch = frame[y:(y + h), x:(x + h)]

                            # Generate minimum bounding rectangle around the larger contour
                            head_point = get_head_point(
                                patch, [x, y], [cx, cy],
                                [[fpx1, fpy1], [fpx2, fpy2]])

                            detection.add_head(head_point)
                            detection.add_area(area)
                        except TypeError:
                            detection.add_head(None)

                    else:
                        pass

        else:
            pass

        try:
            if (detection.head is None) and (detection.centroid is None):
                pass
            else:
                list_of_detections.append(detection)
        except UnboundLocalError:
            pass

    return list(set(list_of_detections)), len(better_contours)
 def get_real_half_product(self):
     return np.float16(
         np.float16(ProductSumTest.__REAL_NUMBER) * self.__times)
Exemplo n.º 46
0
    def test_half_fpe(self):
        with np.errstate(all="raise"):
            sx16 = np.array((1e-4, ), dtype=float16)
            bx16 = np.array((1e4, ), dtype=float16)
            sy16 = float16(1e-4)
            by16 = float16(1e4)

            # Underflow errors
            assert_raises_fpe("underflow", lambda a, b: a * b, sx16, sx16)
            assert_raises_fpe("underflow", lambda a, b: a * b, sx16, sy16)
            assert_raises_fpe("underflow", lambda a, b: a * b, sy16, sx16)
            assert_raises_fpe("underflow", lambda a, b: a * b, sy16, sy16)
            assert_raises_fpe("underflow", lambda a, b: a / b, sx16, bx16)
            assert_raises_fpe("underflow", lambda a, b: a / b, sx16, by16)
            assert_raises_fpe("underflow", lambda a, b: a / b, sy16, bx16)
            assert_raises_fpe("underflow", lambda a, b: a / b, sy16, by16)
            assert_raises_fpe("underflow", lambda a, b: a / b,
                              float16(2.0**-14), float16(2**11))
            assert_raises_fpe(
                "underflow",
                lambda a, b: a / b,
                float16(-(2.0**-14)),
                float16(2**11),
            )
            assert_raises_fpe(
                "underflow",
                lambda a, b: a / b,
                float16(2.0**-14 + 2**-24),
                float16(2),
            )
            assert_raises_fpe(
                "underflow",
                lambda a, b: a / b,
                float16(-(2.0**-14) - 2**-24),
                float16(2),
            )
            assert_raises_fpe(
                "underflow",
                lambda a, b: a / b,
                float16(2.0**-14 + 2**-23),
                float16(4),
            )

            # Overflow errors
            assert_raises_fpe("overflow", lambda a, b: a * b, bx16, bx16)
            assert_raises_fpe("overflow", lambda a, b: a * b, bx16, by16)
            assert_raises_fpe("overflow", lambda a, b: a * b, by16, bx16)
            assert_raises_fpe("overflow", lambda a, b: a * b, by16, by16)
            assert_raises_fpe("overflow", lambda a, b: a / b, bx16, sx16)
            assert_raises_fpe("overflow", lambda a, b: a / b, bx16, sy16)
            assert_raises_fpe("overflow", lambda a, b: a / b, by16, sx16)
            assert_raises_fpe("overflow", lambda a, b: a / b, by16, sy16)
            assert_raises_fpe("overflow", lambda a, b: a + b, float16(65504),
                              float16(17))
            assert_raises_fpe("overflow", lambda a, b: a - b, float16(-65504),
                              float16(17))
            assert_raises_fpe("overflow", np.nextafter, float16(65504),
                              float16(np.inf))
            assert_raises_fpe("overflow", np.nextafter, float16(-65504),
                              float16(-np.inf))
            assert_raises_fpe("overflow", np.spacing, float16(65504))

            # Invalid value errors
            assert_raises_fpe("invalid", np.divide, float16(np.inf),
                              float16(np.inf))
            assert_raises_fpe("invalid", np.spacing, float16(np.inf))
            assert_raises_fpe("invalid", np.spacing, float16(np.nan))

            # These should not raise
            float16(65472) + float16(32)
            float16(2**-13) / float16(2)
            float16(2**-14) / float16(2**10)
            np.spacing(float16(-65504))
            np.nextafter(float16(65504), float16(-np.inf))
            np.nextafter(float16(-65504), float16(np.inf))
            np.nextafter(float16(np.inf), float16(0))
            np.nextafter(float16(-np.inf), float16(0))
            np.nextafter(float16(0), float16(np.nan))
            np.nextafter(float16(np.nan), float16(0))
            float16(2**-14) / float16(2**10)
            float16(-(2**-14)) / float16(2**10)
            float16(2**-14 + 2**-23) / float16(2)
            float16(-(2**-14) - 2**-23) / float16(2)
Exemplo n.º 47
0
#use this for LUT-MANT generation for float16 with first 6 bits used to address LUT
import numpy as np
import math

a = np.float64(1)
print("module LUT2(addr, log);")
print("    input [5:0] addr;")
print("    output reg [15:0] log;")
print("")
print("    always @(addr) begin")
print("        case (addr)")

for i in range (64):
	temp = np.log(a)
	num = bin(np.float16(temp).view('H'))[2:].zfill(16)
	print("			6'b{0:b}".format(i).zfill(5)),
	print("		: log = 16'b{0};".format(num))
	a += np.exp2(-6)

print("        endcase")
print("    end")
print("endmodule")
Exemplo n.º 48
0
def normalize(utterance):
    utterance = utterance - np.mean(utterance, axis=0, dtype=np.float64)
    return np.float16(utterance)
Exemplo n.º 49
0
def SignNumpy(x):
	return np.float16(2.*np.greater_equal(x,0)-1.)	# 将值变为+1或-1
Exemplo n.º 50
0
 def test_encode_numpy_float(self):
     self.assertEqual(
         json.dumps(np.float16(3.76953125),
                    cls=utils_json.AirflowJsonEncoder), '3.76953125')
Exemplo n.º 51
0
        test_photos = test_photos[0:5]

    if phone.endswith("_orig"):

        # load pre-trained model
        saver = tf.train.Saver()
        saver.restore(sess, "models_orig/" + phone)

        for photo in test_photos:

            # load training image and crop it if necessary

            print("Testing original " + phone.replace("_orig", "") +
                  " model, processing image " + photo)
            image = np.float16(
                misc.imresize(misc.imread(test_dir + photo),
                              res_sizes[phone])) / 255

            image_crop = utils.extract_crop(image, resolution, phone,
                                            res_sizes)
            image_crop_2d = np.reshape(image_crop, [1, IMAGE_SIZE])

            # get enhanced image

            enhanced_2d = sess.run(enhanced, feed_dict={x_: image_crop_2d})
            enhanced_image = np.reshape(enhanced_2d,
                                        [IMAGE_HEIGHT, IMAGE_WIDTH, 3])
            enhanced_image = cv2.resize(enhanced_image, (1280, 720))
            # before_after = np.hstack((image_crop, enhanced_image))
            photo_name = photo.rsplit(".", 1)[0]
Exemplo n.º 52
0
import numpy as np
from bitstring import BitArray


def differentPrecision(num, bits):
    binum = BitArray(float=num, length=bits)
    sign = int(binum[0])
    if bits == 32:
        exponent = binum[1:9].bin
        mantissa = binum[9:].bin
    else:
        exponent = binum[1:12].bin
        mantissa = binum[12:].bin
    return sign, exponent, mantissa


def displayPrecision(num):
    binum = BitArray(float=num, length=64)
    sign = int(binum[0])
    exponent = binum[1:12].bin
    mantissa = binum[12:].bin
    return sign, exponent, mantissa


print("float16:", displayPrecision(np.float16(1 / 3)))
print("float32:", displayPrecision(np.float32(1 / 3)))
print("float64:", displayPrecision(np.float64(1 / 3)))
print('\n')
print("float16 to float32:", displayPrecision(np.float32(np.float16(1 / 3))))
print("float16 to float64:", displayPrecision(np.float64(np.float16(1 / 3))))
print("float32 to float64:", displayPrecision(np.float64(np.float32(1 / 3))))
Exemplo n.º 53
0
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")

np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")

np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")

if sys.version_info >= (3, 8):
    np.uint64(D())
    np.float32(D())
    np.complex64(D())

np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
np.bytes_("hello", encoding='utf-8')
np.str_("hello")
Exemplo n.º 54
0
        "hello": "world",
        1: 42,
        2.5: 45
    },
    {
        "hello": set([2, 3]),
        "world": set([42.0]),
        "this": None
    },
    np.int8(3),
    np.int32(4),
    np.int64(5),
    np.uint8(3),
    np.uint32(4),
    np.uint64(5),
    np.float16(1.9),
    np.float32(1.9),
    np.float64(1.9),
    np.zeros([8, 20]),
    np.random.normal(size=[17, 10]),
    np.array(["hi", 3]),
    np.array(["hi", 3], dtype=object),
    np.random.normal(size=[15, 13]).T,
]

if sys.version_info >= (3, 0):
    PRIMITIVE_OBJECTS += [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
    PRIMITIVE_OBJECTS += [
        long(42),
        long(1 << 62),
Exemplo n.º 55
0
def datasetvormgeven(lijstsequenties):
    """Functie maakt de benodigde lijsten om een zelfde vorm te krijgen
    als de moving mnist dataset die als voorbeeld gebruikt wordt.
    Afbeeldingen wordne in gelezen in grayscale en gecheckt dat de 
    breedte == 788 en de hoogte == 525. Als dit niet zo is worden deze
    afbeeldingen uit de dataset verwijdert."""

    inp = []
    gt = []
    imdata = []
    index = 0
    x = 0

    for element in lijstsequenties:

        tijdelijk = []
        errorinseq = False

        for model in element:
            img = cv2.imread(BASEPATH + '/' + model, cv2.IMREAD_GRAYSCALE)

            h, w = img.shape

            if 525 != h or 788 != w:
                # print("Probleem H of W bij " + model)
                errorinseq = True
                continue

            img = cv2.resize(img, (240, 164), interpolation = cv2.INTER_AREA)

            img = np.float32(img)

            img = cv2.normalize(img, img, 0, 1, norm_type=cv2.NORM_MINMAX)

            img = np.float16(img)

            tijdelijk.append([img])

        if errorinseq == True:
            # print("Sequentie geskipt!")
            continue

        if args.add_dummy_data != 0:
            for i in range(args.add_dummy_data):
                dummydata = np.zeros((164, 240), dtype=np.float16)
                tijdelijk.append([dummydata])
                
        for t in tijdelijk:
            imdata.append(t)

        totl = len(tijdelijk)
        inpl = SEQGROOTTE - PREDGROOTTE
        gtl = PREDGROOTTE + args.add_dummy_data

        tijdelijk = []
        tijdelijk.append(index)
        tijdelijk.append(inpl)

        inp.append(tijdelijk)

        index += inpl

        tijdelijk = []
        tijdelijk.append(index)
        tijdelijk.append(gtl)

        gt.append(tijdelijk)

        index += gtl

        x += 1

    print("Aantal sequenties = " + str(x))

    return inp, gt, imdata
Exemplo n.º 56
0
def handle_representation():
    table = pd.read_csv(process_path, index_col=0, encoding='utf-8')
    print('***** data ')

    for i in range(table.shape[0]):
        try:
            if isinstance(table['price'][i], str):
                if '-' in table['price'][i]:
                    a, b = table['price'][i].split('-')
                    a = np.float16(
                        a.replace('-',
                                  '').replace(' ',
                                              '').replace('£',
                                                          '').replace(',', ''))
                    b = np.float16(
                        b.replace('-',
                                  '').replace(' ',
                                              '').replace('£',
                                                          '').replace(',', ''))
                    table['price'][i] = a + b / 2.0
                else:
                    table['price'][i] = np.float16(table['price'][i].replace(
                        '£', '').replace(' ', '').replace(',', ''))

            if isinstance(table['average_review_rating'][i], str):
                table['average_review_rating'][i] = np.float16(
                    table['average_review_rating'][i].replace(
                        ' out of 5 stars', '').replace(' ', ''))
                # print('%d finish'%i)

            if isinstance(
                    table['customers_who_bought_this_item_also_bought'][i],
                    str):
                products = re.findall(
                    regex,
                    table['customers_who_bought_this_item_also_bought'][i])
                p = ''
                for k, product in enumerate(products):
                    product_s = product.split('/')[0]
                    if k > 0:
                        p += '|' + product_s
                    else:
                        p += product_s
                table['customers_who_bought_this_item_also_bought'][i] = p
                # print('%d finish'%i)

            if isinstance(
                    table['items_customers_buy_after_viewing_this_item'][i],
                    str):
                products = re.findall(
                    regex,
                    table['items_customers_buy_after_viewing_this_item'][i])
                p = ''
                for k, product in enumerate(products):
                    product_s = product.split('/')[0]
                    if k > 0:
                        p += '|' + product_s
                    else:
                        p += product_s
                # print(i,' : ',p)
                table['items_customers_buy_after_viewing_this_item'][i] = p
                # print('%d finish' % i)

            if isinstance(table['customer_reviews'][i], str):
                s = table['customer_reviews'][i].replace('\n', '')
                s = re.sub(r"\s{2,}", " ", s)
                a = s.split('//')
                p = ''
                for k in range(0, len(a), 4):
                    if k > len(a) - 1 or k + 1 > len(a) - 1:
                        break
                    if k > 0:
                        p += '|%s-%s' % (a[k].replace(
                            '|', ' '), a[k + 1].replace(' ', ''))
                    else:
                        p += '%s-%s' % (a[k].replace(
                            '|', ' '), a[k + 1].replace(' ', ''))
                table['customer_reviews'][i] = p
                # print('%d finish' % i)

            if isinstance(table['customer_questions_and_answers'][i], str):
                s = table['customer_questions_and_answers'][i].replace(
                    '\n', ' ')
                s = re.sub(r"\s{2,}", " ", s).replace('://', '')

                p = ''
                a = s.split('|')
                for k, q_a in enumerate(a):
                    q, a = q_a.split('//')
                    if 'see more' in a:
                        r = re.findall('see more\s*(.*)\s*see less', s)[0]
                    else:
                        r = a
                    if k > 0:
                        p += '|%s//%s' % (q, r)
                    else:
                        p += '%s//%s' % (q, r)
                table['customer_questions_and_answers'][i] = p
                # print('%d finish' % i)

            if isinstance(table['number_of_reviews'][i], str):
                table['number_of_reviews'][i] = np.int16(
                    table['number_of_reviews'][i].replace(',', ''))
                # print('%d finish'%i)
            print('%d finish' % i)
            pass

        except Exception as e:
            print('str(e):\t\t', str(e))
            # print(table['price'][i],' - ',table['average_review_rating'][i])
            traceback.print_exc()
            break

    table.to_csv('D:\HKBU_AI_Classs\IT_Project\process.csv')
Exemplo n.º 57
0
 def runtest(self):
     model = RegressionModel.load(os.path.join(self.modelfile, "linear"), "linear")
     betas, stats, resid = model.fit(self.rdd)
     result = stats.map(lambda (_, v): float16(v)).collect()
     savemat(self.savefile + "tmp.mat", mdict={"tmp": result}, oned_as='column')
Exemplo n.º 58
0
 def runtest(self):
     result = self.rdd.map(lambda (_, v): float16(v[0])).collect()
     savemat(self.savefile + "tmp.mat", mdict={"tmp": result}, oned_as='column')
Exemplo n.º 59
0
 def normalize(arr: np.ndarray) -> np.ndarray:
     '''нормализация к1 канального избражения от 0 до 255'''
     arr = np.float16(arr)
     amin = arr.min()
     rng = arr.max() - amin
     return ((arr - amin) * 255 / rng).astype(np.uint8)
Exemplo n.º 60
0
	def distance_check_global(self,robot_name, joints_list):
		with self._lock:
			robot_idx=self.dict[robot_name]
			distance_report=RRN.GetStructureType("edu.rpi.robotics.distance.distance_report")
			distance_report1=distance_report()

			for i in range(self.num_robot):
				robot_joints=joints_list[i]
				self.t_env.setState(self.robot_joint_list[i], robot_joints)

			env_state = self.t_env.getCurrentState()
			self.manager.setCollisionObjectsTransform(env_state.link_transforms)

			result = ContactResultMap()

			self.manager.contactTest(result, ContactRequest(ContactTestType_ALL))
			result_vector = ContactResultVector()
			collisionFlattenResults(result,result_vector)

			distances = [r.distance for r in result_vector]
			nearest_points=[[r.nearest_points[0],r.nearest_points[1]] for r in result_vector]

			names = [[r.link_names[0],r.link_names[1]] for r in result_vector]
			# nearest_index=np.argmin(distances)

			min_distance=9
			min_index=-1
			Closest_Pt=[0.,0.,0.]
			Closest_Pt_env=[0.,0.,0.]
			#initialize
			distance_report1.Closest_Pt=Closest_Pt
			distance_report1.Closest_Pt_env=Closest_Pt_env

			for i in range(len(distances)):

				#only 1 in 2 collision "objects"
				if (names[i][0] in self.robot_link_list[robot_idx] or names[i][1] in self.robot_link_list[robot_idx]) and distances[i]<min_distance and not (names[i][0] in self.robot_link_list[robot_idx] and names[i][1] in self.robot_link_list[robot_idx]):
					min_distance=distances[i]
					min_index=i


			J2C=0
			if (min_index!=-1):
				if names[min_index][0] in self.robot_link_list[robot_idx] and names[min_index][1] in self.robot_link_list[robot_idx]:
					stop=1
					print("stop")
				elif names[min_index][0] in self.robot_link_list[robot_idx]:
					J2C=self.robot_link_list[robot_idx].index(names[min_index][0])-1
					Closest_Pt=nearest_points[min_index][0]
					Closest_Pt_env=nearest_points[min_index][1]

				elif names[min_index][1] in self.robot_link_list[robot_idx]:
					J2C=self.robot_link_list[robot_idx].index(names[min_index][1])-1
					Closest_Pt=nearest_points[min_index][1]
					Closest_Pt_env=nearest_points[min_index][0]


				if robot_idx==1:
					J2C=self.Sawyer_link(J2C)

				
				distance_report1.Closest_Pt=np.float16(Closest_Pt).flatten().tolist()
				distance_report1.Closest_Pt_env=np.float16(Closest_Pt_env).flatten().tolist()
				distance_report1.min_distance=np.float16(distances[min_index])
				distance_report1.J2C=(J2C if J2C>=0 else 0)	
				
				
				return distance_report1

			return distance_report1