Example #1
0
def poly_clip(x, y, xleft, xright, ytop, ybottom):
    """Clip a polygon to the given bounding box.

    x and y are 1D arrays describing the coordinates of the vertices.
    xleft, xright, ytop and ybottom specify the borders of the
    bounding box.  Note that a cartesian axis system is used such that
    the following must hold true:

    x_left < x_right
    y_bottom < y_top

    The x and y coordinates of the vertices of the resulting polygon
    are returned.

    """
    x,y = atype([x,y],[np.double,np.double])

    assert len(x) == len(y), "Equal number of x and y coordinates required"
    assert ytop > ybottom
    assert xleft < xright

    # close polygon if necessary
    if x[0] != x[-1] or y[0] != y[-1]:
        x = np.append(x,x[0])
        y = np.append(y,y[0])

    xleft,xright,ytop,ybottom = map(np.double,[xleft,xright,ytop,ybottom])

    workx = np.empty(2*len(x)-1,dtype=np.double)
    worky = np.empty_like(workx)
    M = _lib.poly_clip(len(x),x,y,xleft,xright,ytop,ybottom,workx,worky)
    workx[M] = workx[0]
    worky[M] = worky[0]
    return workx[:M], worky[:M]
Example #2
0
def EvaluatePolicy(s, w_pi, useRBFKernel = False):
  
    # the value of the improved policy
    value = np.zeros((len(s),1))

    # the new policy
    policy = [False] * len(s)

    # iterate through every state, 
    for idx in range(len(s)):

        # State-Action value function for actions 0.0 and 1.0
        if useRBFKernel == True:
            q0 = np.dot(computePhiRBF(s[idx], 0.0).T, w_pi)
            q1 = np.dot(computePhiRBF(s[idx], 1.0).T, w_pi)
        else:
            q0 = np.dot(np.append(s[idx, 0],0.0), w_pi)
            q1 = np.dot(np.append(s[idx, 0],1.0), w_pi)

        # update the value
        value[idx] = max(q0, q1)

        # update the policy
        policy[idx] = True if q1 > q0 else False
        
    return (policy, value)
def plot_mpl_fig(): 
    rootdir = '/Users/catherinefielder/Documents/Research_Halos/HaloDetail'
    cs = []
    pops = []
    for subdir, dirs, files in os.walk(rootdir):
        head,tail = os.path.split(subdir)
        haloname = tail
        for file in files:
            if file.endswith('_columnsadded'):
                values = ascii.read(os.path.join(subdir, file), format = 'commented_header') #Get full path and access file
                host_c = values[1]['host_c']  
                cs = np.append(cs, host_c)                     
                pop = len(values['mvir(10)'])
                pops = np.append(pops, pop)
                print pop
                plt.loglog(host_c, pop, alpha=0.8,label = haloname)
        print "%s done. On to the next." %haloname
    #plt.xscale('log')
    #plt.yscale('log')
    plt.xlabel('Host Concentration')
    plt.ylabel('Nsat')
    plt.title('Abundance vs. Host Concentration', ha='center')
    #plt.legend(loc='best')
    spearman = scipy.stats.spearmanr(cs, pops)
    print spearman
def get_mean_vmax():
    hostvmaxs = []
    hostvmax25s = []
    hostvmax75s = []
    twentyfifth, fifty, seventyfifth = get_percentile()
    rootdir = "/Users/catherinefielder/Documents/Research_Halos/HaloDetail"
    for subdir, dirs, files in os.walk(rootdir):
        head, tail = os.path.split(subdir)
        haloname = tail
        for file in files:
            if file.endswith("_columnsadded_final"):
                values = ascii.read(
                    os.path.join(subdir, file), format="commented_header"
                )  # Get full path and access file
                hostvmax = values[1]["host_vmax"]
                hostvmaxs = np.append(hostvmaxs, hostvmax)
    twentyfifth = np.percentile(hostvmaxs, 25)
    seventyfifth = np.percentile(hostvmaxs, 75)
    for i in range(0, len(hostvmaxs)):
        if hostvmaxs[i] >= seventyfifth:
            hostvmax75s = np.append(hostvmax75s, hostvmaxs[i])
        elif hostvmaxs[i] < twentyfifth:
            hostvmax25s = np.append(hostvmax25s, hostvmaxs[i])
        else:
            continue
    sumvmax = np.sum(hostvmaxs)
    meanvmax = np.divide(sumvmax, len(hostvmaxs))
    mean75 = np.mean(hostvmax75s)
    mean25 = np.mean(hostvmax25s)
    print "mean"
    print meanvmax
    print mean75
    print mean25
    return meanvmax, mean75, mean25
Example #5
0
def loadParticles(filename):

    file = open(filename)

    particlePools = {}

    for line in file.readlines():
        #print line
        if line[0] == '#':
            continue

        id, x, y, z, r = line.split()

        if not id in particlePools:
            particlePools[id] = Particles()

        pool = particlePools[id]
        
        pool.pos = numpy.append(pool.pos, 
                                [[float(x), float(y), float(z)]],
                                axis=0)

        pool.radii = numpy.append(pool.radii, float(r))


    file.close()

    return particlePools
Example #6
0
    def testRandom(self):
        # input is [1, 0, 0, ...] which corresponds to an ConstantQ of constant magnitude 1
        with open(os.path.join(script_dir,'constantq/CQinput.txt'), 'r') as f:
        	#read_data = f.read()
        	data = np.array([], dtype='complex64')
        	line = f.readline()
        	while line != '':
        		re = float(line.split('\t')[0])
        		im = float(line.split('\t')[1])
        		data = np.append(data, re + im * 1j)
        		line = f.readline()
				

        with open(os.path.join(script_dir,'constantq/QMoutput.txt'), 'r') as u:
            QMdata_out = np.array([], dtype='complex64')        
            for line in u:
                re = line.split('+')[0]
                re = float(re[1:])
                im = line.split('+')[1]
                im = float(im[:-3])
                QMdata_out = np.append(QMdata_out, re + im * 1j)
       

        CQdata = ConstantQ()(cvec(data))
        QMdata_out = np.array(QMdata_out, dtype='complex64') 
        

        DifferMean = QMdata_out-CQdata  # difference mean
        DifferMean = ((sum(abs(DifferMean.real))/len(DifferMean))+(sum(abs(DifferMean.imag))/len(DifferMean)))/2 # difference mean
        DiverPerReal = (sum(((QMdata_out.real-CQdata.real)/QMdata_out.real)*100))/len(QMdata_out.real) #divergence mean percentage 
        DiverPerImag = (sum(((QMdata_out.imag-CQdata.imag)/QMdata_out.imag)*100))/len(QMdata_out.imag)  #divergence mean percentage 
        DiverPer = (DiverPerReal + DiverPerImag) / 2

        
        """
Example #7
0
def iir_bandstops(fstops, fs, order=4):
    """ellip notch filter
    fstops is a list of entries of the form [frequency (Hz), df, df2]                           
    where df is the pass width and df2 is the stop width (narrower                              
    than the pass width). Use caution if passing more than one freq at a time,                  
    because the filter response might behave in ways you don't expect.
    """
    nyq = 0.5 * fs

    # Zeros zd, poles pd, and gain kd for the digital filter
    zd = np.array([])
    pd = np.array([])
    kd = 1

    # Notches
    for fstopData in fstops:
        fstop = fstopData[0]
        df = fstopData[1]
        df2 = fstopData[2]
        low = (fstop - df) / nyq
        high = (fstop + df) / nyq
        low2 = (fstop - df2) / nyq
        high2 = (fstop + df2) / nyq
        z, p, k = iirdesign([low,high], [low2,high2], gpass=1, gstop=6,
                            ftype='ellip', output='zpk')
        zd = np.append(zd,z)
        pd = np.append(pd,p)

    # Set gain to one at 100 Hz...better not notch there                                        
    bPrelim,aPrelim = zpk2tf(zd, pd, 1)
    outFreq, outg0 = freqz(bPrelim, aPrelim, 100/nyq)

    # Return the numerator and denominator of the digital filter                                
    b,a = zpk2tf(zd,pd,k)
    return b, a
Example #8
0
def multi_where(vec1, vec2):
    '''Given two vectors, multi_where returns a tuple of indices where those
    two vectors overlap.
    ****THIS FUNCTION HAS NOT BEEN TESTED ON N-DIMENSIONAL ARRAYS*******
    Inputs:
           2 numpy vectors
    Output:
           (xy, yx) where xy is a numpy vector containing the indices of the
           elements in vector 1 that are also in vector 2. yx is a vector
           containing the indices of the elements in vector 2 that are also
           in vector 1.
    Example:
           >> x = np.array([1,2,3,4,5])
           >> y = np.array([3,4,5,6,7])
           >> (xy,yx) = multi_where(x,y)
           >> xy
           array([2,3,4])
           >> yx
           array([0,1,2])
    '''

    OneInTwo = np.array([])
    TwoInOne = np.array([])
    for i in range(vec1.shape[0]):
        if np.where(vec2 == vec1[i])[0].shape[0]:
            OneInTwo = np.append(OneInTwo,i)
            TwoInOne = np.append(TwoInOne, np.where(vec2 == vec1[i])[0][0])

    return (np.int8(OneInTwo), np.int8(TwoInOne))
Example #9
0
	def get_data(self,name):
		obj = self.read_csv(name)

		date = []
		date.append(obj[0]["Date"])

		high=np.array([],dtype="float32")
		low=np.array([],dtype="float32")
		vol=np.array([],dtype="float32")
		aver=np.array([],dtype="float32")

		before_high=float(obj[0]["High"])
		before_low=float(obj[0]["Low"])
		before_vol=float(obj[0]["Volume"])

		for day in obj[1:]:

			date.append(day["Date"])
			aver=np.append(aver,(float(day["High"])+float(day["Low"]))/2)
			high=np.append(high,(float(day["High"])-before_high)/before_high)
			low=np.append(low,(float(day["Low"])-before_low)/before_low)
			vol=np.append(vol,(float(day["Volume"])-before_vol)/before_vol)

			before_high=float(day["High"])
			before_low=float(day["Low"])
			before_vol=float(day["Volume"])

		output={"start":date[0],"date":date,"high":high,"low":low,"vol":vol,"aver":aver}
		return output
Example #10
0
def recordDVM(filename='voltdata.npz',sun=False,moon=False,recordLength=np.inf,verbose=True):
    ra = 0
    dec = 0
    raArr = np.ndarray(0)
    decArr = np.ndarray(0)
    lstArr = np.ndarray(0)
    jdArr = np.ndarray(0)
    voltArr = np.ndarray(0)
    
    startTime = time.time()

    while np.less(time.time()-startTime,recordLength):
        if sun:
            raDec = sunPos()
            ra = raDec[0]
            dec = raDec[1]
        startSamp = time.time()
        currVolt = getDVMData()
        currLST = getLST()
        currJulDay = getJulDay()
        raArr = np.append(raArr,ra)
        decArr = np.append(decArr,ra)
        voltArr = np.append(voltArr,currVolt)
        lstArr = np.append(lstArr,currLST)
        jdArr = np.append(jdArr,currJulDay)

        if verbose:
            print 'Measuring voltage: ' + str(currVolt) + ' (LST: ' + str(currLST) +'  ' + time.asctime() + ')'
        
        np.savez(filename,ra=raArr,dec=decArr,jd=jdArr,lst=lstArr,volts=voltArr)
        sys.stdout.flush()
        time.sleep(np.max([0,1.0-(time.time()-startSamp)]))
Example #11
0
    def append_new_point(self, y, x=None):
        self._axis_y_array = np.append(self._axis_y_array, y)
        if x:
            self._axis_x_array = np.append(self._axis_x_array, x)
        else:
            self._axis_x_array = np.arange(len(self._axis_y_array))

        if self.max_plot_points:
            if self._axis_y_array.size > self.max_plot_points:
                self._axis_y_array = np.delete(self._axis_y_array, 0)
                self._axis_x_array = np.delete(self._axis_x_array, 0)

        if self.single_curve is None:
            self.single_curve, = self.axes.plot(
                self._axis_y_array, linewidth=2, marker="s"
            )
        else:
            self.axes.fill(self._axis_y_array, "r", linewidth=2)

        self._axis_y_limits[1] = (
            self._axis_y_array.max() + self._axis_y_array.max() * 0.05
        )
        self.axes.set_ylim(self._axis_y_limits)
        self.single_curve.set_xdata(self._axis_x_array)
        self.single_curve.set_ydata(self._axis_y_array)
        self.axes.relim()
        self.axes.autoscale_view()
        self.fig.canvas.draw()
        self.fig.canvas.flush_events()
        self.axes.grid(True)

        # TODO move y lims as propery
        self.axes.set_ylim(
            (0, self._axis_y_array.max() + self._axis_y_array.max() * 0.05)
        )
Example #12
0
def fittedQ(task, explore_agent = random, trials = 1000, 
    regressor = ExtraTreesRegressor, regressor_params = {}, N = 30,
    prev_sample = None):

    Q = None
    n = 0

    def q_prediction(sa):
        return Q.predict(sa) if Q else 0

    state_histories, action_histories, reward_histories = task.run_trials()
    

    #todo add previous sample support
    # if prev_sample:
    #     state_histories = np.concatenate((prev_sample[0], state_histories ))
    #     action_histories = np.concatenate((prev_sample[1], action_histories))
    #     reward_histories = np.concatenate((prev_sample[2], reward_histories))

    # patients = len(state_histories)
    print state_histories[0]
    print action_histories[0]
    print reward_histories[0]

    while (n < N):
    	total_turns = sum([len(game) for game in state_histories])
    	training_data = np.zeros()


    #generate function approximator
    while (n < N) :
        
        #build training set
        training_data = np.zeros((patients*episode_length, state_size+1))
        training_targets = np.zeros(patients*episode_length)
        for patient in range(patients):
            for episode in range(episode_length):
                state_action = np.append(state_histories[patient][episode], action_histories[patient][episode]) 

                next_sa = [np.append(state_histories[patient][episode+1], action) for action in xrange(num_actions) ]
                q = reward_histories[patient][episode] + discount * max([q_prediction(sa) for sa in next_sa])
                
                #convert sa here
                training_data[patient*episode + episode] = state_action
                training_targets[patient*episode + episode] = q

        #train regressor
        Q = regressor(**regressor_params)
        Q.fit(training_data, training_targets)

        n += 1


    #get policy
    def learned_policy(state, rng):
        next_sa = [np.append(state, action) for action in xrange(num_actions) ]
        action = next_sa[np.argmax([q_prediction(sa) for sa in next_sa])][-1]
        return action

    return learned_policy, (state_histories, action_histories, reward_histories), Q
Example #13
0
def _indtosub_converter(dims, order='F', onebased=True):
    """Converter for changing linear indexing to subscript indexing

    See also
    --------
    Series.indtosub
    """

    _check_order(order)

    def indtosub_inline_onebased(k, dimprod):
        return tuple(map(lambda (x, y): int(mod(ceil(float(k)/y) - 1, x) + 1), dimprod))

    def indtosub_inline_zerobased(k, dimprod):
        return tuple(map(lambda (x, y): int(mod(ceil(float(k+1)/y) - 1, x)), dimprod))

    inline_fcn = indtosub_inline_onebased if onebased else indtosub_inline_zerobased

    if size(dims) > 1:
        if order == 'F':
            dimprod = zip(dims, append(1, cumprod(dims)[0:-1]))
        else:
            dimprod = zip(dims, append(1, cumprod(dims[::-1])[0:-1])[::-1])
        converter = lambda k: inline_fcn(k, dimprod)
    else:
        converter = lambda k: (k,)

    return converter
Example #14
0
def opt_queue(mu, K, l, t):
    q = [0] * K
    N = np.zeros([K, K])
    ucb_avg = np.zeros([K, K])
    queue = np.empty((0, K), int)
    for i in range(K):
        mui = np.amax(mu[i, :])
        lam = l[i]
        q[i] = stat_sample(lam, mui, 1, 100)
    queue = np.append(queue, np.array([q]), axis=0)
    c, G, h = schedule(K, l, mu)
    sol = solvers.lp(c, G, h)
    solx = sol["x"]
    hp = []
    for i in range(1, len(solx)):
        hp = hp + [solx[i]]
    result = BVN(hp, K)
    prob = [pit[1] for pit in result]
    values = range(len(prob))
    for s in range(1, t):
        index = weighted_values(values, prob, 1)
        # (q,N,ucb_avg) = one_schedule(result[index][0],mu,K,q,N,ucb_avg)

        one_schedule(result[index][0], mu, K, q, N, ucb_avg, l)
        queue = np.append(queue, np.array([q]), axis=0)

    return queue
Example #15
0
def read_fits(ccd, order_frame, soldir, interp=False):
   rm, xpos, target, res, w_c, y1, y2 =  mode_setup_information(ccd.header)

   if target=='upper': 
     target=True
   else:
     target=False


   xarr=np.array([])
   farr=np.array([])
   oarr=np.array([])

   min_order = int(order_frame.data[order_frame.data>0].min())
   max_order = int(order_frame.data[order_frame.data>0].max())
    
   for n_order in np.arange(min_order, max_order):
      try:
         shift_dict, ws = pickle.load(open(soldir+'sol_%i.pkl' % n_order))
      except:
         continue
      x, f = xextract_order(ccd, order_frame, n_order, shift_dict, target=target, interp=interp)
      o=np.ones_like(x)*n_order
      xarr=np.append(xarr,x)
      farr=np.append(farr,f)
      oarr=np.append(oarr,o)
   return xarr,farr,oarr
Example #16
0
    def pose_callback(self, msg):
        self.cgeopose_ = msg
        self.cpose_ = msg.position
        self.cquat_ = msg.orientation
        pp = geodesy.utm.fromMsg(self.cpose_)
        tnow = rospy.get_time()
                
        if self.current_waypoint <= self.offset:
            print "Arrived at first waypoint, creating fast march explorer."
            origin = copy.copy(pp)
            origin.easting -= self.X[self.offset, 0]+self.offpos[0]
            origin.northing -= self.X[self.offset, 1]+self.offpos[1]
            self.zero_utm = origin
            self.start_time = tnow
            self.frame_trigger = self.start_time + self.dt
            self.samples = np.array([[0, self.X[0,0], self.X[0,1], self.Y[0]]])
            for i in range(1, self.offset+1):
                self.samples = np.append(self.samples, [[0, self.X[i,0], self.X[i,1], self.Y[i]]], axis=0)
            self.current_waypoint = self.offset+1

        elif self.current_waypoint == self.total_waypoints:
            print "Arrived at final waypoint, saving data."
            fh = open('timed_log_trial2.p', 'wb')
            pickle.dump(self.samples, fh)
            pickle.dump(self.poses, fh)
            pickle.dump(self.targets, fh)
            fh.close()
            self.current_waypoint+=1
        elif self.current_waypoint < self.total_waypoints:
            clocalpos = self.get_local_coords(pp)
            self.samples = np.append(self.samples, [[tnow-self.start_time, clocalpos[0], clocalpos[1], self.Y[self.current_waypoint]]], axis=0)
            self.current_waypoint+=1
        else:
            print "Extra waypoint reached!"
        print "Waypoint {0} reached at t={1}, x={2}, y={3}, obs={4}".format(self.current_waypoint, *self.samples[-1,:])
def arraySlidingWindow(result_array, sliding_window_size, filter_ratio):
	array_length = np.size(result_array)
	buffer_array = np.zeros((1), dtype = np.int)

	for index in range(0, array_length - sliding_window_size):
		window_score = np.sum(result_array[index: index + sliding_window_size])
		if window_score > (sliding_window_size * filter_ratio):
			buffer_array = np.append(buffer_array, 1)
		else:
			buffer_array = np.append(buffer_array, 0)

	buffer_array= np.delete(buffer_array, 0)
	# print buffer_array
	length = np.size(buffer_array)
	flag_array = np.zeros((length), dtype = np.int)
	pre_value = 0
	for buffer_index , value in enumerate(buffer_array):
		if (pre_value - value) == -1:
			flag_array[buffer_index] = 1
		elif(pre_value - value) == 1:
			flag_array[buffer_index] = -1
		else:
			pass
		pre_value = value
	return flag_array
def stftFiltering(x, fs, w, N, H, filter):
# apply a filter to a sound by using the STFT
# x: input sound, w: analysis window, N: FFT size, H: hop size
# filter: magnitude response of filter with frequency-magnitude pairs (in dB)
# returns y: output sound
	M = w.size                                     # size of analysis window
	hM1 = int(math.floor((M+1)/2))                 # half analysis window size by rounding
	hM2 = int(math.floor(M/2))                     # half analysis window size by floor
	x = np.append(np.zeros(hM2),x)                 # add zeros at beginning to center first window at sample 0
	x = np.append(x,np.zeros(hM1))                 # add zeros at the end to analyze last sample
	pin = hM1                                      # initialize sound pointer in middle of analysis window       
	pend = x.size-hM1                              # last sample to start a frame
	w = w / sum(w)                                 # normalize analysis window
	y = np.zeros(x.size)                           # initialize output array
	while pin<=pend:                               # while sound pointer is smaller than last sample      
	#-----analysis-----  
		x1 = x[pin-hM1:pin+hM2]                      # select one frame of input sound
		mX, pX = DFT.dftAnal(x1, w, N)               # compute dft
	#------transformation-----
		mY = mX + filter                             # filter input magnitude spectrum
	#-----synthesis-----
		y1 = DFT.dftSynth(mY, pX, M)                # compute idft
		y[pin-hM1:pin+hM2] += H*y1                  # overlap-add to generate output sound
		pin += H                                    # advance sound pointer
	y = np.delete(y, range(hM2))                  # delete half of first window which was added in stftAnal
	y = np.delete(y, range(y.size-hM1, y.size))   # add zeros at the end to analyze last sample
	return y
Example #19
0
    def compress_pdf(self, data):
        """ Compress the PDF

        """
        simple_spline = np.array([], dtype=np.float16)
        index_array = np.array([], dtype=np.uint8)
        for i in xrange(1, data.shape[1]):
            spline = spl(data[:, 0], data[:, i], ext=1)
            int_zw = spline(self.zlim)
            index_over = np.where(int_zw > self.thresh)[0]

            if len(index_over) < 2:
                print len(index_over)
                raise ValueError('The grid spacing is too big. This can happen for instance if the PDF resembles a delta function.')

            try:
                index_array = np.append(index_array,
                                        np.array([index_over[0], index_over[-1]], dtype=np.float16))
            except IndexError as e:
                print "Decrease the grid spacing! This can happen if the PDFs are a bit undersmoothed."
                print e.message
            #which points are above the threshold?
            data_over = np.array(int_zw[index_over[0]:index_over[-1]], dtype=np.float16)
            simple_spline = np.append(simple_spline, data_over)
        return simple_spline, index_array
Example #20
0
def tomatrix(results, train=True, count = True):

    N = len(results[0][0])+1

    if train:
        if count:
            tuplelist = []
            for i in range(len(results)):
                tuplelist.append(
                    tuple(list(np.append(results[i][0], results[i][1]))))
            Count = Counter(tuplelist).most_common()
            tooutput = np.empty((len(Count),  N+1))

            for i in range(len(Count)):
                tooutput[i, :] = np.append(np.array(Count[i][0]), Count[i][1])

            return tooutput.astype(int)

        else:
            tooutput_ = np.empty((len(results),N))
            for i in range(len(results)):
                tooutput_[i,:] = np.append(results[i][0],results[i][1])
            return tooutput_

    else:
        tooutput = np.empty((len(results), 50+N-1))

        for i in range(len(results)):
            tooutput[i, :] = np.hstack((results[i][1], results[i][0]))

        return tooutput.astype(int)
Example #21
0
    def _get_variables(self, variables, profiles, profiles_depth,
                       time, x, y, z, block):
        """Wrapper around reader-specific function get_variables()

        Performs some common operations which should not be duplicated:
        - monitor time spent by this reader
        - convert any numpy arrays to masked arrays
        """

        logging.debug('Fetching variables from ' + self.name)
        if profiles is not None and block is True:
            # If profiles are requested for any parameters, we
            # add two fake points at the end of array to make sure that the
            # requested block has the depth range required for profiles
            x = np.append(x, [x[-1], x[-1]])
            y = np.append(y, [y[-1], y[-1]])
            z = np.append(z, [profiles_depth[0], profiles_depth[1]])
        env = self.get_variables(variables, time, x, y, z, block)

        # Convert any numpy arrays to masked arrays
        for var in env.keys():
            if isinstance(env[var], np.ndarray):
                env[var] = np.ma.masked_array(env[var], mask=False)

        # Make sure x and y are floats (and not e.g. int64)
        if 'x' in env.keys():
            env['x'] = np.array(env['x'], dtype=np.float)
            env['y'] = np.array(env['y'], dtype=np.float)

        return env
    def prop_ring(self):
        """
        Test properties for a ring, modelled as a thin walled something
        """

        radius = 1.
        # make sure the simple test cases go well
        x = np.linspace(0,radius,100000)
        y = np.sqrt(radius*radius - x*x)
        x = np.append(-x[::-1], x)
        y_up = np.append(y[::-1], y)
        tw1 = np.ndarray((len(x),3), order='F')
        tw1[:,0] = x
        tw1[:,1] = y_up
        tw1[:,2] = 0.01

        tw2 = np.ndarray((len(x),3), order='F')
        y_low = np.append(-y[::-1], -y)
        tw2[:,0] = x
        tw2[:,1] = y_low
        tw2[:,2] = 0.01

        # tw1 and tw2 need to be of the same size, give all zeros
        upper_bound = sp.zeros((4,2), order='F')
        lower_bound = sp.zeros((4,2), order='F')

        st_arr, EA, EIxx, EIyy = properties(upper_bound, lower_bound,
                    tw1=tw1, tw2=tw2, rho=1., rho_tw=1., E=1., E_tw=1.)

        headers = HawcPy.ModelData().st_column_header_list
        print '\nRING PROPERTIES'
        for index, item in enumerate(headers):
            tmp = item + ' :'
            print tmp.rjust(8), st_arr[index]
def BED_extract(path, nfft):
  list_data = numpy.array([])
  list_label = numpy.array([])
  
  """
  dic = {'W':[1,0],'L':[0,1],'E':[0,1],'A':[0,1],'F':[1,0],'T':[0,1],'N':[0.5,0.5]}
  """
  dic = {'W':[0,1],'L':[0,1],'E':[0,1],'A':[0,1],'F':[1,0],'T':[0,1],'N':[0.5,0.5]}
  

  for root, dir, files in os.walk(path):

    rootpath = os.path.join(os.path.abspath(path), root)

    for file in files:
      if os.path.splitext(file)[1].lower()=='.wav':
        filepath = os.path.join(rootpath, file)

        SR, X = wavfile.read(filepath)

        _, _, spec = mfcc(X, fs=SR, nfft=(nfft*2))

        list_data = numpy.append(list_data, numpy.mean(spec, axis=0)[:nfft]/numpy.max(spec))
        list_label = numpy.append(list_label, dic[file[5]])

  list_data = numpy.reshape(list_data, (len(list_data)/nfft, nfft))
  list_label = numpy.reshape(list_label, (len(list_label)/label_length, label_length))

  return list_data, list_label
Example #24
0
def cap(guess_vector):
    """
    This takes the Euler equations, and sets them equal to zero for an f-solve
    Remember that Keq was found by taking the derivative of the sum of the 
        utility functions, with respect to k in each time period, and that 
        leq was the same, but because l only shows up in 1 period, it has a
        much smaller term.

    ### Paramaters ###
    guess_vector: The first half is the intial guess for the kapital, and
        the second half is the intial guess for the labor
    """
    #equations for keq
    ks = np.zeros(periods)
    ks[1:] = guess_vector[:periods-1]
    ls  = guess_vector[periods-1:]
    kk  = ks[:-1]
    kk1 = ks[1:]
    kk2 = np.zeros(periods-1)
    kk2[:-1] = ks[2:]
    lk  = ls[:-1]
    lk1 = ls[1:]
    #equation for leq
    ll = np.copy(ls)
    kl = np.copy(ks)
    kl1 = np.zeros(periods)
    kl1[:-1] = kl[1:]
    w = wage(ks, ls)
    r = rate(ks, ls)
    keq = ((lk*w+(1.+r-delta)*kk - kk1)**-gamma - (beta*(1+r-delta)*(lk1*w+(1+r-delta)*kk1-kk2)**-gamma))
    leq = ((w*(ll*w + (1+r-delta)*kl-kl1)**-gamma)-(1-ll)**-sigma)
    error = np.append(keq, leq)

    return np.append(keq, leq)
Example #25
0
def Mie_ab(m,x):
#  http://pymiescatt.readthedocs.io/en/latest/forward.html#Mie_ab
  mx = m*x
  nmax = np.round(2+x+4*(x**(1/3)))
  nmx = np.round(max(nmax,np.abs(mx))+16)
  n = np.arange(1,nmax+1)
  nu = n + 0.5

  sx = np.sqrt(0.5*np.pi*x)
  px = sx*jv(nu,x)

  p1x = np.append(np.sin(x), px[0:int(nmax)-1])
  chx = -sx*yv(nu,x)

  ch1x = np.append(np.cos(x), chx[0:int(nmax)-1])
  gsx = px-(0+1j)*chx
  gs1x = p1x-(0+1j)*ch1x

  # B&H Equation 4.89
  Dn = np.zeros(int(nmx),dtype=complex)
  for i in range(int(nmx)-1,1,-1):
    Dn[i-1] = (i/mx)-(1/(Dn[i]+i/mx))

  D = Dn[1:int(nmax)+1] # Dn(mx), drop terms beyond nMax
  da = D/m+n/x
  db = m*D+n/x

  an = (da*px-p1x)/(da*gsx-gs1x)
  bn = (db*px-p1x)/(db*gsx-gs1x)

  return an, bn
Example #26
0
    def find_offset_old(self,datafile, nonlinmin, nonlinmax, exclude, threshold):
        '''find_offset is used to determine the systematic offset present
        in the experimental setup that causes data to not be symmetric
        about zero input angle. It reads in the output of laserBench and
        returns the offset (in degrees)'''
        
        input_a, output_a = np.loadtxt(datafile,usecols=(0,1),unpack=True)
        
        for e in exclude:
            did = np.where(input_a == e)
            output_a = np.delete(output_a, did)
            input_a = np.delete(input_a, did)

        pidx = np.where(input_a > nonlinmax)
        nidx = np.where(input_a < nonlinmin)
        
        in_a = np.append(input_a[nidx],input_a[pidx])
        out_a = np.append(-1*output_a[nidx],output_a[pidx])
        error = np.zeros(in_a.size)+1

        b = 1000.
        offset = 0.
        while abs(b) > threshold:
            m, b = ADE.fit_line(in_a,out_a,error)
            offset += b
            in_a += b

        return offset
Example #27
0
def read_power(file, datadir='data/'):
    """ 
    29-apr-2009/dintrans: coded
    t,dat=read_power(name_power_file)
    Read a power spectra file like 'data/poweru.dat'
    """ 
    filename = path.join(datadir, file)
    infile = open(filename, 'r')
    lines = infile.readlines()
    infile.close()
#
#  find the number of blocks (t,power) that should be read
#
    dim=read_dim(datadir=datadir)
    nblock=int(len(lines)/int(N.ceil(dim.nxgrid/2/8.)+1))
#
    with open(filename, 'r') as infile:
        t=N.zeros(1, dtype='Float32')
        data=N.zeros(1, dtype='Float32')
        for i in range(nblock):
            st=infile.readline()
            t=N.append(t, float(st))
            for ii in range(int(N.ceil(dim.nxgrid/2/8.))):
                st=infile.readline()
                data=N.append(data, N.asarray(st.split()).astype('f'))

    t=t[1:] ; data=data[1:]
    nt=len(t) ; nk=int(len(data)/nt)
    data=data.reshape(nt, nk)
    return t, data
Example #28
0
 def ReadGeoPolygonLst(self, polygonLst ):
     """
     Read GeoPolygon List from a txt file
     longitude latitude
     """
     f = open(polygonLst, 'r');
     NumbC=0;
     newpolygon=False;
     for lines in f.readlines():
         lines=lines.split();
         if newpolygon==True:
             lon=lines[0];
             if lon=='>':
                 newpolygon=False;
                 self.append(geopolygon)
                 continue;
             else:
                 lon=float(lines[0]);
                 lat=float(lines[1]);
                 geopolygon.lonArr=np.append(geopolygon.lonArr, lon);
                 geopolygon.latArr=np.append(geopolygon.latArr, lat);
         a=lines[0];
         b=lines[1];
         if a=='#' and b!='@P':
             continue;
         if b=='@P':
             NumbC=NumbC+1;
             newpolygon=True;
             geopolygon=GeoPolygon();
             continue;
         f.close()
     print 'End of reading', NumbC, 'geological polygons';
     return
Example #29
0
def FindBigStuff(data,xsd =3,sd_method = 'Quian'):
    
    #s = np.std(data,0) * xsd
    
    #print s
    spikelist = np.array([0,0,0])[None,...]
    m,n = data.shape
    s = np.zeros(n)
    for i in range(n):
        
        x = data[:,i]
        if sd_method == 'Quian':
            s[i] = xsd * np.median(np.abs(x)) / 0.6745
        elif sd_method == 'STD':
            s[i] = np.std(x) * xsd
        taux = np.diff(np.where(abs(x)>s[i],1,0))
        times = np.nonzero(taux==1)[0]
        times2 = np.nonzero(taux==-1)[0]
        if len(times) !=0:
            if len(times)-1 == len(times2):
                times2 = np.append(times2,m)
            elif len(times) == len(times2)-1:
                times = np.append(0,times)
            chs = np.ones(times.shape)*i
            aux = np.append(chs[...,None],times[...,None],1)   
            aux = np.append(aux,times2[...,None],1)  
            spikelist = np.append(spikelist,aux,0)
    return np.delete(spikelist, (0), axis=0),s
Example #30
0
def valid_test_Ngram(filepath, words2index, N, test=False):
    results = []
    if test == False:
        with open(filepath) as f:
            i = 1
            for line in f:
                lsplit = line.split()
                if lsplit[0] == 'Q':
                    topredict = np.array([words2index[x] for x in lsplit[1:]])
                if lsplit[0] == 'C':
                    l = np.append(
                        np.repeat(words2index['<s>'], N-1), [words2index[x] for x in lsplit[1:-1]])
                    lastNgram = l[-N+1:]
                    results.append((lastNgram, topredict))
    else:
        with open(filepath) as f:
            i = 1
            for line in f:
                lsplit = line.split()
                if lsplit[0] == 'Q':
                    topredict = np.array([words2index[x] for x in lsplit[1:]])
                if lsplit[0] == 'C':
                    l = np.append(
                        np.repeat(words2index['<s>'], N-1), [words2index[x] for x in lsplit[1:-1]])
                    lastNgram = l[-N+1:]
                    results.append((lastNgram, topredict))
    return results
 def init(self):
     self.obs = self.obs_init.copy()
     self.t = 0
     return np.append(self.obs, self.t)
Example #32
0
def getdata_onehot(testdatafile):
    ### READ in test dataset
    """ Reads the test data file and extracts allele subtype,
            peptide length, and measurement type. Returns these information
            along with the peptide sequence and target values.
    """
    print("Test peptide name: ", testdatafile)
    import os, re
    test_set = os.path.join("./DATA", "test_data", testdatafile)
    print("test_set name: ", test_set)
    test_data = pd.read_csv(test_set, delim_whitespace=True)
    #test_data = pd.read_csv('./DATA/test_data/A0202',sep="\t")
    '''
    [77 rows x 16 columns]
    >>> test_data.columns
    Index(['Date', 'IEDB', 'Allele', 'Peptide_length', 'Measurement_type',
           'Peptide_seq', 'Measurement_value', 'NetMHCpan', 'SMM', 'ANN', 'ARB',
           'SMMPMBEC', 'IEDB_Consensus', 'NetMHCcons', 'PickPocket', 'mhcflurry'],
          dtype='object')
    '''
    import re
    peptide = re.search(r'[A-Z]\*\d{2}:\d{2}', test_data['Allele'][0]).group()
    peptide_length = len(test_data['Peptide_seq'][0])
    measurement_type = test_data['Measurement_type'][0]

    if measurement_type.lower() == 'binary':
        test_data['Measurement_value'] = np.where(
            test_data.Measurement_value == 1.0, 1, 0)
    else:
        test_data['Measurement_value'] = np.where(
            test_data.Measurement_value < 500.0, 1, 0)

    test_label = test_data.Measurement_value

    ### end of reading test dataset

    ### NOW, READ training dataset
    """ Reads the training data file and returns the sequences of peptides
        and target values
    """
    train_set = './DATA/train_data/proteins.txt'
    df = pd.read_csv(train_set, delim_whitespace=True, header=0)
    '''
    [141224 rows x 3 columns]>
    >>> df.columns
    Index(['Peptide', 'HLA', 'BindingCategory'], dtype='object')
    '''
    #df.columns = ['sequence', 'HLA', 'target']
    # build training matrix
    #df.shape #(141224, 3)
    df = df[df.HLA == peptide]
    #df.shape #(14736, 3)
    df = df[df['Peptide'].map(len) == peptide_length]
    # df.shape #(10549, 3)
    # remove any peptide with  unknown variables
    df = df[df.Peptide.str.contains('X') == False]
    df = df[df.Peptide.str.contains('B') == False]
    #df.shape  #(10547, 3)
    # remap target values to 1's and 0's
    df['BindingCategory'] = np.where(df.BindingCategory == 1, 1, 0)
    ###
    """ Reads the specified train and test files and return the
            relevant design and target matrix for the learning pipeline.
    """
    # map the training peptide sequences to their integer index
    featureMatrix = np.empty((0, peptide_length, len(allSequences)), int)
    for num in range(len(df.Peptide)):
        featureMatrix = np.append(featureMatrix,
                                  [Pept_OneHotMap(df.Peptide.iloc[num])],
                                  axis=0)

    # map the test peptide sequences to their integer index
    testMatrix = np.empty((0, peptide_length, len(allSequences)), int)
    for num in range(len(test_data.Peptide_seq)):
        testMatrix = np.append(
            testMatrix, [Pept_OneHotMap(test_data.Peptide_seq.iloc[num])],
            axis=0)
    ###
    trainlen = len(featureMatrix)
    testlen = len(testMatrix)
    ss1 = list(range(trainlen))
    rnd.shuffle(ss1)
    valsize = 20  #Validation set size is 20 for 3 validations dataset
    X_val1 = featureMatrix[ss1[0:valsize]]
    Y_val1 = df['BindingCategory'].iloc[ss1[0:valsize]]
    X_val2 = featureMatrix[ss1[valsize:(2 * valsize)]]
    Y_val2 = df['BindingCategory'].iloc[ss1[valsize:(2 * valsize)]]
    X_val3 = featureMatrix[ss1[(2 * valsize):(3 * valsize)]]
    Y_val3 = df['BindingCategory'].iloc[ss1[(2 * valsize):(3 * valsize)]]
    labelmatrix = df.BindingCategory
    featureMatrix = np.delete(featureMatrix, ss1[0:(3 * valsize)], axis=0)
    labelmatrix = labelmatrix.drop(labelmatrix.index[ss1[0:(3 * valsize)]])
    # combine training and test datasets
    datasets = {}
    datasets['X_train'] = featureMatrix
    datasets['Y_train'] = labelmatrix.values  #df.BindingCategory.as_matrix()
    datasets['X_test'] = testMatrix
    datasets['Y_test'] = test_data.Measurement_value.values
    datasets['X_val1'] = X_val1
    datasets['Y_val1'] = Y_val1.values
    datasets['X_val2'] = X_val2
    datasets['Y_val2'] = Y_val2.values
    datasets['X_val3'] = X_val3
    datasets['Y_val3'] = Y_val3.values
    return datasets
def calc_dataset(dataset, img_files):
    confusion = []

    for x in range(len(yolo.names)):
        confus = []
        for y in range(len(yolo.names)):
            confus.append(0)
        confusion.append(confus)

    class_predictions = []

    class_totals = []

    progress = 0

    for j in range(len(img_files)):

        totals = []

        for i in range(len(yolo.names)):
            totals.append(0)

        if (j / len(img_files) > progress):
            sys.stdout.write("50%" if progress == 0.5 else " = ")
            sys.stdout.flush()
            progress += 0.1

        class_predictions = []

        class_totals = []

        gc.collect()
        lower_index = j

        img = imread(
            img_files[lower_index].replace("/data/acp15tdw",
                                           "/home/thomas/experiments"), 0)

        height, width = img.shape

        area = height * width
        size_cat = "xl"

        if area < area_thresholds[0]:
            size_cat = "xs"
        elif area < area_thresholds[1]:
            size_cat = "s"
        elif area < area_thresholds[2]:
            size_cat = "l"

        img_file = img_files[lower_index]

        v_imgs, v_labels, v_obj_detection = load_files([img_file])

        v_imgs = (np.array(v_imgs) / 127.5) - 1

        v_labels = np.array(v_labels) / cfg.grid_shape[0]

        v_obj_detection = np.array(v_obj_detection)

        cfg.object_detection_threshold = confidence_threshold

        widget_q = np.sum(v_labels[..., 4])

        busy_cat = "crowded"

        if widget_q < quantity_thresholds[0]:
            busy_cat = "desolate"
        elif widget_q < quantity_thresholds[1]:
            busy_cat = "few"
        elif widget_q < quantity_thresholds[2]:
            busy_cat = "many"
        if len(v_labels) == 0:
            continue

        if one_class:
            v_obj_detection = np.zeros_like(v_obj_detection)

        v_labels_classes = np.append(np.expand_dims(v_obj_detection, axis=-1),
                                     v_labels,
                                     axis=-1)

        res, correct, iou = sess.run(
            [yolo.output, yolo.matches, yolo.best_iou],
            feed_dict={
                yolo.train_bounding_boxes: v_labels,
                yolo.train_object_recognition: v_obj_detection,
                yolo.x: v_imgs,
                yolo.anchors: anchors,
                yolo.iou_threshold: iou_threshold,
                yolo.object_detection_threshold: confidence_threshold
            })

        boxes = yolo.convert_net_to_bb(res, filter_top=True)

        if one_class:
            boxes[..., 0] = 0

        real_boxes = np.reshape(v_labels_classes, [boxes.shape[0], -1, 6])

        boxes, real_ious = yolo.calculate_max_iou(boxes, real_boxes)

        labels = boxes[0]

        if one_class:
            labels[..., 0] = 0

        precision = 0
        recall = 0

        truth_labels = np.reshape(v_labels, [-1, v_labels.shape[2]])

        ls = labels.tolist()

        for rc in range(len(yolo.names)):

            pred_boxes = 0
            correct_boxes = 0
            matched_boxes = 0
            truth_boxes = 0
            for b in range(len(ls)):
                box = ls[b]

                if box[0] == rc and box[5] > confidence_threshold:
                    pred_boxes += 1
                    if box[6] > 0.3:
                        correct_boxes += 1

            r_boxes = real_boxes[0]

            r_boxes = r_boxes[r_boxes[..., 5] > 0]

            for i in range(len(r_boxes)):
                r_box = r_boxes[i]
                max_iou = real_ious[0][i]
                if r_box[0] == rc:
                    truth_boxes += 1
                    if max_iou > 0.3:
                        matched_boxes += 1

            sens_string = ""

            if pred_boxes > 0:
                sens_string += img_file + "," + dataset + "," + yolo.names[rc] + ",precision," + \
                              str(correct_boxes/pred_boxes) + "," + str(correct_boxes) + "," + str(pred_boxes) + "\n"
            if truth_boxes > 0:
                sens_string += img_file + "," + dataset + "," + yolo.names[rc] + ",recall," + \
                              str(matched_boxes/truth_boxes) + "," + str(matched_boxes) + "," + str(truth_boxes) + "\n"
            if len(sens_string) > 0:
                with open("validation.csv", "a") as file:
                    file.write(sens_string + "\n")

        img, h, w, = res.shape[:3]

        img -= 1
        h -= 1
        w -= 1

        o_img, o_h, o_w, = res.shape[:3]

        img = o_img - 1

        while img >= 0:
            h = o_h - 1
            while h >= 0:
                w = o_w - 1
                while w >= 0:
                    lab = res[img, h, w]
                    if v_labels[img, h, w, 4] > 0:
                        clazz = np.argmax(lab[25:])
                        if one_class:
                            clazz = 0
                        c_clazz = v_obj_detection[img, h, w]
                        confusion[clazz][c_clazz] += 1
                        totals[c_clazz] += 1
                    w = w - 1
                h = h - 1
            img = img - 1

        #v_obj_detection = np.zeros_like(v_obj_detection)

        for rc in range(len(yolo.names)):
            if (len(class_predictions) < rc + 1):
                class_predictions.append([])
            if (len(class_totals) < rc + 1):
                class_totals.append(0)

            cl_equals = np.where(
                res[..., 4] > confidence_threshold,
                np.equal(v_obj_detection,
                         np.zeros_like(v_obj_detection) + rc), 0)

            cl_quantity = np.sum(cl_equals.astype(np.int32))

            if cl_quantity > 0:
                class_totals[rc] += cl_quantity

                for ic in range(cfg.grid_shape[0]):
                    for jc in range(cfg.grid_shape[1]):

                        label = labels[(jc * cfg.grid_shape[0]) + ic]

                        if label[5] > confidence_threshold and int(
                                label[0]) == rc:
                            class_predictions[rc].append(
                                [label[5], label[6] > iou_threshold, label[6]])

        del v_imgs
        del v_labels
        del v_obj_detection

        for rc in range(len(class_predictions)):

            if (class_totals[rc] == 0):
                class_totals[rc] = 1

            class_predictions[rc] = sorted(class_predictions[rc],
                                           key=lambda box: -box[0])
            correct_n = 0

            for box in range(len(class_predictions[rc])):
                correct_n += class_predictions[rc][box][1]

            pred_count = len(class_predictions[rc])

            if pred_count == 0:
                pred_count = 1

            total_count = totals[rc]

            if total_count == 0:
                continue
                total_count = 1

    confusion_s = ""

    for x in range(len(yolo.names)):
        for y in range(len(yolo.names)):
            confusion_s += yolo.names[x] + "," + yolo.names[y] + "," + str(
                confusion[x][y]) + "," + dataset + "," + str(
                    class_totals[y]) + "\n"

    with open("confusion.csv", "a") as file:
        file.write(confusion_s + "\n")

    print(totals)

    gc.collect()
Example #34
0
def predict(args, model, tokenizer, prefix=""):

    predict_task_names = (args.task_name,)
    predict_outputs_dirs = (args.output_dir,)

    results = {}
    for predict_task, predict_output_dir in zip(predict_task_names, predict_outputs_dirs):
        predict_dataset = load_and_cache_examples(args, predict_task, tokenizer, data_type='predict')
        if not os.path.exists(predict_output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(predict_output_dir)
        per_gpu_predict_batch_size = args.per_gpu_eval_batch_size
        args.predict_batch_size = per_gpu_predict_batch_size * max(1, args.n_gpu)
        predict_sampler = SequentialSampler(predict_dataset)# if args.local_rank == -1 else DistributedSampler(predict_dataset)
        predict_dataloader = DataLoader(predict_dataset, sampler=predict_sampler, batch_size=args.predict_batch_size,
                                     collate_fn=collate_fn)

        # predict!
        logger.info("***** Running predict {} *****".format(prefix))
        logger.info("  Num examples = %d", len(predict_dataset))
        logger.info("  Batch size = %d", args.predict_batch_size)
        predict_loss = 0.0
        nb_predict_steps = 0
        preds = None
        out_label_ids = None
        pbar = ProgressBar(n_total=len(predict_dataloader), desc="predict")
        for step, batch in enumerate(predict_dataloader):
            model.eval()
            batch = tuple(t.to(args.device) for t in batch)
            with torch.no_grad():
                inputs = {'input_ids': batch[0],
                          'attention_mask': batch[1],
                          'labels': batch[3]}
                inputs['token_type_ids'] = batch[2]
                outputs = model(**inputs)
                tmp_predict_loss, logits = outputs[:2]
                predict_loss += tmp_predict_loss.mean().item()
            nb_predict_steps += 1
            if preds is None:
                preds = logits.detach().cpu().numpy()
                out_label_ids = inputs['labels'].detach().cpu().numpy()
            else:
                preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
                out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
            pbar(step)
        print(' ')
        if 'cuda' in str(args.device):
            torch.cuda.empty_cache()
        predict_loss = predict_loss / nb_predict_steps
        preds = np.argmax(preds, axis=1)
        logger.info('预测结果是{}'.format(preds))
        preds_out_path = os.path.join(args.output_dir, 'preds_out.txt')
        with open(preds_out_path, 'w', encoding='utf-8') as f:
            for i in range(len(preds)):
                
                f.write('预测:%s-真实:%s\n' % (preds[i], out_label_ids[i]))
        result = metrics(predict_task, preds, out_label_ids)
        logger.info('计算矩阵是{}'.format(result))
        results.update(result)
        logger.info("***** predict results {} *****".format(prefix))
        for key in sorted(result.keys()):
            logger.info("  %s = %s", key, str(result[key]))
    return results
Example #35
0
    def add_edges(
        self,
        source_docs: Sequence[Union['Document', str]],
        dest_docs: Sequence[Union['Document', str]],
        edge_features: Optional[Sequence[Optional[Dict]]] = None,
    ):
        """
        Add edges to the graph connecting docs from `source_docs` with docs from `dest_docs`

        :param source_docs: Iterable of docs containing the source nodes
        :param dest_docs: Iterable of docs containing the destination nodes
        :param edge_features: Optional features dictionary to be added to the new created edges
        """
        from scipy.sparse import coo_matrix

        assert len(source_docs) == len(dest_docs), (
            'the number of source documents must match the number of '
            'destination documents '
        )
        assert edge_features is None or len(source_docs) == len(edge_features)

        is_documents_source = isinstance(source_docs[0], Document)
        is_documents_dest = isinstance(dest_docs[0], Document)

        for k, (doc1, doc2) in enumerate(zip(source_docs, dest_docs)):
            doc1_id = doc1.id if is_documents_source else doc1
            doc2_id = doc2.id if is_documents_source else doc2

            if is_documents_source:
                self.add_single_node(doc1)
            else:
                assert (
                    doc1_id in self.nodes
                ), 'trying to add an edge from a node not in the graph'
            if is_documents_dest:
                self.add_single_node(doc2)
            else:
                assert (
                    doc2_id in self.nodes
                ), 'trying to add an edge from a node not in the graph'

            edge_key = self._get_edge_key(doc1_id, doc2_id)

            if edge_features is not None:
                self.edge_features[edge_key] = edge_features[k]
            else:
                if edge_key not in self.edge_features:
                    self.edge_features[edge_key] = None

        # manipulate the adjacency matrix in a single shot
        current_adjacency = self.adjacency
        source_node_offsets = np.array(
            [
                self._nodes._index_map[source.id if is_documents_source else source]
                for source in source_docs
            ]
        )
        target_node_offsets = np.array(
            [
                self._nodes._index_map[target.id if is_documents_dest else target]
                for target in dest_docs
            ]
        )

        if current_adjacency is None:
            row = source_node_offsets
            col = target_node_offsets
            data = np.ones(len(source_node_offsets), dtype=int)
        else:
            row = np.append(current_adjacency.row, source_node_offsets)
            col = np.append(current_adjacency.col, target_node_offsets)
            data = np.append(
                current_adjacency.data, np.ones(len(source_node_offsets), dtype=int)
            )

        NdArray(
            self._pb_body.graph.adjacency,
        ).value = coo_matrix((data, (row, col)))
Example #36
0
for j in range(steps):
    for i in range(CA_size):
        if i != CA_size-1:
            ii = i+1
        else:
            ii = 0
        if j == 0:
           nhoods[i] = x[i-1]*4+x[i]*2+x[ii]
        else:
           nhoods[i] = x[j,i-1]*4+x[j,i]*2+x[j,ii]
    new_x = array.array("i",itertools.repeat(0,CA_size))
#apply rule
    for i in range(CA_size):
        new_x[i] = int(rule[7-nhoods[i]])
    if j == 0:
        x = np.append([x],[new_x], axis = 0)
    else:
        x = np.append(x,[new_x], axis = 0)

#write output as portable bitmap (PBM)
#first line is "P1 width height" followed by
#height lines of 0's and 1's of length width

# Lets plot
fig, ax = plt.subplots()

image = x
ax.imshow(image, cmap=plt.cm.gist_yarg, interpolation='nearest')
ax.set_title('ECA Rule #'+str(rulen))

# Move left and bottom spines outward by 10 points
Example #37
0
def SED(freq, freqs, fluxes, errs, models='pow', figname=None):
    """Fit SED models to an individual source and return the model params and errors along with the expected flux at a given frequency, for each input model.
    Lists must be the same length and contain at least two elements, all with the same units (ideally MHz and Jy).

    Arguments:
    ----------
    freq : float
        The frequency at which to calculate the flux.
    freqs : list
        A list of frequencies in the same units.
    fluxes : list
        A list of fluxes in the same units.
    errs : list
        A list of flux uncertainties in the same units.

    Keyword arguments:
    ------------------
    models : string or list
        A single model or list of models to fit (e.g. ['pow','FFA','SSA']).
    figname : string
        Write a figure of the radio spectra and model to file, using this filename. Use None to not write to file.

    Returns:
    --------
    fit_models : list
        A list of fitted models.
    names : 2D list
        A list of lists of names of fitted parameters, for each input model.
    params : 2D list
        A list of lists of fitted parameters, for each input model.
    errors : 2D list
        A list of lists of uncertainties on the fitted parameters, for each input model.
    fitted_fluxes : list
        A list of fitted fluxes at the input frequency, for each input model.
    rcs : list
        A list of reduced chi squared values, for each input model.
    BICs : list
        A list of Bayesian Information Criteria (BIC) values, for each input model."""

    #initial guesses of different params
    S_max = max(fluxes)
    nu_max = freqs[fluxes == S_max][0]
    alpha = -0.8
    beta = 1 - 2 * alpha
    nu_br = np.mean(freqs)
    p = 0.5

    #initial guesses of different models
    params = {
        'pow': [S_max, alpha],
        'powcibreak': [S_max, alpha, nu_br],
        'powjpbreak': [S_max, alpha, nu_br],
        'curve': [S_max, nu_max, 1, alpha],
        'ssa': [S_max, beta, nu_max],
        'ssacibreak': [S_max, beta, nu_max, nu_br],
        'ssajpbreak': [S_max, beta, nu_max, nu_br],
        'ffa': [S_max, alpha, nu_max],
        'bicffa': [S_max, alpha, p, nu_max],
        'bicffacibreak': [S_max, alpha, p, nu_max, nu_br],
        'bicffajpbreak': [S_max, alpha, p, nu_max, nu_br]
    }

    #different SED models from functions above
    funcs = {
        'pow': powlaw,
        'powcibreak': pow_CIbreak,
        'powjpbreak': pow_JPbreak,
        'curve': curve,
        'ssa': SSA,
        'ssacibreak': SSA_CIbreak,
        'ssajpbreak': SSA_JPbreak,
        'ffa': FFA,
        'bicffa': Bic98_FFA,
        'bicffacibreak': Bic98_FFA_CIbreak,
        'bicffajpbreak': Bic98_FFA_JPbreak
    }

    #matplotlib colours
    colours = {
        'pow': 'black',
        'powcibreak': 'b',
        'powjpbreak': 'violet',
        'curve': 'r',
        'ssa': 'g',
        'ssacibreak': 'r',
        'ssajpbreak': 'g',
        'ffa': 'orange',
        'bicffa': 'r',
        'bicffacibreak': 'b',
        'bicffajpbreak': 'r'
    }

    #labels
    labels = {
        'pow': 'Power law',
        'powcibreak': 'Power law\n + CI break',
        'powjpbreak': 'Power law\n + JP break',
        'curve': 'Tschager+03 Curve',
        'ssa': 'Single SSA',
        'ssacibreak': 'Single SSA\n + CI break',
        'ssajpbreak': 'Single SSA\n + JP break',
        'ffa': 'Single FFA',
        'bicffa': 'Bicknell+98 FFA',
        'bicffacibreak': 'Bicknell+98 FFA\n + CI break',
        'bicffajpbreak': 'Bicknell+98 FFA\n + JP break'
    }

    #store used models, fitted parameters and errors, fitted fluxes, reduced chi squared and BIC
    fit_models,fit_params,fit_param_errors,fitted_fluxes,rcs,BICs = [],[],[],[],[],np.array([])

    #convert single model to list
    if type(models) is str:
        models = [models]

    for model in models:
        model = model.lower()

        #fit model if DOF >= 1
        if len(freqs) >= len(params[model]) + 1:
            try:
                #perform a least squares fit
                popt, pcov = opt.curve_fit(funcs[model],
                                           freqs,
                                           fluxes,
                                           p0=params[model],
                                           sigma=errs,
                                           maxfev=10000)

                #add all fit info to lists
                fit_models.append(model)
                fit_params.append(popt)
                fit_param_errors.append(np.sqrt(np.diag(pcov)))
                RCS, bic = fit_info(fluxes, funcs[model](freqs, *popt), errs,
                                    len(popt))
                rcs.append(RCS)
                BICs = np.append(BICs, bic)
                fitted_fluxes.append(funcs[model](freq, *popt))
            except (ValueError, RuntimeError), e:
                print "Couldn't find good fit for {0} model.".format(model)
                print e
Example #38
0
# x_test = x_test.reshape((1, x_test.shape[0], 1))
# x_test = np.reshape(x_test, (x_test.shape[0], 1, x_test.shape[1], 1))
# test = x_test.shape[1]
#Load Model
model = tf.keras.models.load_model('C:/Projects/Stock Tools/Model/stock_prediction.h5')

print('predicting model')
days_predicted = len(dataset_test)
predictions_list = np.zeros(shape=(1,1))
for x in range(days_predicted):
    x_test2 = array(x_test)
    x_test3 = x_test2.reshape((1, x_test2.shape[0], 1))
    predictions = model.predict(x_test3)
    next_val = predictions[0][0]
    x_test = x_test[1:]
    x_test = np.append(x_test, next_val)
    predictions_list = np.append(predictions_list, predictions)

predictions_list = predictions_list[1:]
predictions_list = predictions_list.reshape(predictions_list.shape[0],1)
predictions = scaler.inverse_transform(predictions_list)
dataset_train = scaler.inverse_transform(dataset_train)
offsetx =len(dataset_train)
offsetx_list = [offsetx]
for i in range(1, len(predictions)):
    offsetx_list.append(offsetx_list[i-1] + 1)


print('plotting results')
b=plt.figure(2)
plt.plot(offsetx_list, predictions, color="blue",
Example #39
0
def scale_frequencies(hfreq, hmag, freqScaling, freqStretching,
                      timbrePreservation, fs):
    """
    Scales the frequencies of the harmonics of a sound.

    :param hfreq: frequencies of input harmonics
    :param hmag: magnitudes of input harmonics
    :param freqScaling: scaling factors, in time-value pairs (value of 1 no scaling)
    :param freqStretching: stretching factors, in time-value pairs (value of 1 no stretching)
    :param timbrePreservation: 0  no timbre preservation, 1 timbre preservation
    :param fs: sampling rate of input sound
    :returns: yhfreq, yhmag: frequencies and magnitudes of output harmonics
    """
    if freqScaling.size % 2 != 0:  # raise exception if array not even length
        raise ValueError("Frequency scaling array does not have an even size")

    if freqStretching.size % 2 != 0:  # raise exception if array not even length
        raise ValueError(
            "Frequency stretching array does not have an even size")

    L = hfreq.shape[0]  # number of frames
    # create interpolation object with the scaling values
    freqScalingEnv = np.interp(np.arange(L),
                               L * freqScaling[::2] / freqScaling[-2],
                               freqScaling[1::2])
    # create interpolation object with the stretching values
    freqStretchingEnv = np.interp(np.arange(L),
                                  L * freqStretching[::2] / freqStretching[-2],
                                  freqStretching[1::2])
    yhfreq = np.zeros_like(hfreq)  # create empty output matrix
    yhmag = np.zeros_like(hmag)  # create empty output matrix
    for l in range(L):  # go through all frames
        ind_valid = np.where(
            hfreq[l, :] != 0)[0]  # check if there are frequency values
        if ind_valid.size == 0:  # if no values go to next frame
            continue
        if (timbrePreservation
                == 1) & (ind_valid.size > 1):  # create spectral envelope
            # values of harmonic locations to be considered for interpolation
            x_vals = np.append(np.append(0, hfreq[l, ind_valid]), fs / 2)
            # values of harmonic magnitudes to be considered for interpolation
            y_vals = np.append(np.append(hmag[l, 0], hmag[l, ind_valid]),
                               hmag[l, -1])
            specEnvelope = interp1d(x_vals,
                                    y_vals,
                                    kind='linear',
                                    bounds_error=False,
                                    fill_value=-100)
        yhfreq[l, ind_valid] = hfreq[l, ind_valid] * freqScalingEnv[
            l]  # scale frequencies
        yhfreq[l, ind_valid] = yhfreq[l, ind_valid] * (
            freqStretchingEnv[l]**ind_valid)  # stretch frequencies
        if (timbrePreservation
                == 1) & (ind_valid.size > 1):  # if timbre preservation
            yhmag[l, ind_valid] = specEnvelope(
                yhfreq[l, ind_valid])  # change amplitudes to maintain timbre
        else:
            yhmag[l,
                  ind_valid] = hmag[l,
                                    ind_valid]  # use same amplitudes as input
    return yhfreq, yhmag
Example #40
0
# 3D numpy array: num songs x MAX_NOTES x NUM_PARAM
all_songs_data = np.empty((1, MAX_NOTES, NUM_PARAM), np.float64)

for f in glob.glob(data_path + '*.mid'):
  song_notes = get_notes(f)

  temp_data = np.empty((1, 1, NUM_PARAM), np.float64)
  notes_data = np.zeros((1, MAX_NOTES, NUM_PARAM)) - 1
  
  for elem in song_notes:
    # Convert notes into a 2D numpy array
    # The 1st dimension is 5 pitches and 1 duration
    # The 2nd dimension is the number of notes
    raw_notes = np.array([[elem]]) 
    temp_data = np.append(temp_data, raw_notes, axis=1)
  
  temp_data = temp_data[:, 1:, :]
  notes_data[0, :temp_data.shape[1],:temp_data.shape[2]] = temp_data
#   print('notes_data = ', notes_data)
  
  all_songs_data = np.append(all_songs_data, notes_data, axis=0)

  
all_songs_data = all_songs_data[1:,:,:]

all_songs_data.shape

preprocessed_path = "/content/gdrive/My Drive/Applied Deep Learning/Outputs/"

import pickle
Example #41
0
def SfM (tracked_points):
	# Create the U matrix and V
	U, V = None, None
	for lines in tracked_points:
		U_line = lines[:,0].reshape(1, -1)
		V_line = lines[:,1].reshape(1, -1)
		if U is None: U = U_line
		else: U = np.append(U, U_line, axis=0)
		if V is None: V = V_line
		else: V = np.append(V, V_line, axis=0)
	a = np.empty((U.shape[0], 1))
	for l in range(U.shape[0]):
		a[l] = np.sum(U[l])
	b = np.empty((V.shape[0], 1))
	for l in range(V.shape[0]):
		b[l] = np.sum(V[l])
	U_=U-a
	V_=V-b
	# then append U and V to create W
	#W is the registered measurenment matrix
	W = np.append(U_, V_, axis=0)

	if W.shape[0] < W.shape[1]:
		W = W.transpose()
	
	# Find the factorization of the matrix W
	# Where W = u.s.v
	u, s, v = np.linalg.svd(W, full_matrices=False)
	# get sub matriz of u,s,v
	u3 = u[:,0:3]
	s3 = s[0:3]
	v3 = v[0:3,:]
	# Transform the vector s3 into a 3x3 diagonal matrix
	s3_ = np.power(s3, 1/2)
	sqr_s3_ = np.zeros((3,3))
	for i in range(3): sqr_s3_[i,i] = s3_[i]
	# Find the R' and S' matrix that W'=R'.S'
	R = np.dot(u3, sqr_s3_)
	S = np.dot(sqr_s3_, v3)
	print(R.shape, S.shape)

	# Find L in :
	# ALAt = Id  			# (N, 3) (3, 3) (3, N) = (N, N) 
	# ALAtA = IdA  			# (N, 3) (3, 3) (3, N) (N, 3) = (N, N) (N, 3) ----> (N, 3) (3, 3) (3, 3) = (N, 3)
	# AL = IdA(AtA)⁻1		# (N, 3) (3, 3) = (N, 3) (3, 3) ----> (N, 3) (3, 3) = (N, 3) 
	Rt = R.transpose()
	a_sqr = np.dot(Rt, R)
	Id = np.identity(R.shape[0])
	Id = np.dot(Id, R)
	Id = np.dot(Id, np.linalg.inv(a_sqr))
	L, res, rank, s = np.linalg.lstsq(R, Id)
	print(L.shape)

	#Find Q in: L=QQt
	Q = np.linalg.cholesky(L)
	print(Q.shape)
	
	#Find the Rotation and Shape Matrix
	R_final = np.dot(R, Q)
	S_final = np.dot(np.linalg.inv(Q), S)
	print(R_final.shape, S_final.shape)
Example #42
0
    minee_list = []
    for i in range(rep):
        minee_list.append(
            MINEE(torch.Tensor(X[i]),
                  torch.Tensor(Y[i]),
                  batch_size=batch_size,
                  ref_batch_factor=ref_batch_factor,
                  lr=lr))

    dXY_list = np.zeros((rep, 0))
    dX_list = np.zeros((rep, 0))
    dY_list = np.zeros((rep, 0))

    for k in tqdm(range(80000)):
        dXY_list = np.append(dXY_list, np.zeros((rep, 1)), axis=1)
        dX_list = np.append(dX_list, np.zeros((rep, 1)), axis=1)
        dY_list = np.append(dY_list, np.zeros((rep, 1)), axis=1)
        for i in range(rep):
            minee_list[i].step()
            dXY_list[i,
                     -1], dX_list[i,
                                  -1], dY_list[i,
                                               -1] = minee_list[i].forward()
    mi_ma_rate = 0.01  # rate of moving average
    mi_list = (dXY_list - dX_list - dY_list).copy()
    for i in range(1, dXY_list.shape[1]):
        mi_list[:,
                i] = (1 - mi_ma_rate) * mi_list[:, i -
                                                1] + mi_ma_rate * mi_list[:, i]
    MI_list.append(mi_list)
Example #43
0
    def __init__(self, num_state_qubits, min_state_value, max_state_value,
                 breakpoints, slopes, offsets, f_min, f_max, c_approx,
                 i_state=None, i_objective=None):
        r"""
        Args:
            num_state_qubits (int): number of qubits to represent the state
            min_state_value (float): lower bound of values to be represented by state qubits
            max_state_value (float): upper bound of values to be represented by state qubits
            breakpoints (Union(list, numpy.ndarray)): breakpoints of piecewise linear function
            slopes (Union(list, numpy.ndarray)): slopes of linear segments
            offsets (Union(list, numpy.ndarray)): offset of linear segments
            f_min (float): minimal value of resulting function
                           (required for normalization of amplitude)
            f_max (float): maximal value of resulting function
                           (required for normalization of amplitude)
            c_approx (float): approximating factor (linear segments are approximated by
                              contracting rotation
                              around pi/4, where sin\^2() is locally linear)
            i_state (int): indices of qubits that represent the state
            i_objective (int): index of target qubit to apply the rotation to
        """
        super().__init__(num_state_qubits + 1)

        self.num_state_qubits = num_state_qubits
        self.min_state_value = min_state_value
        self.max_state_value = max_state_value

        # sort breakpoints
        i_sort = np.argsort(breakpoints)
        breakpoints = np.array(breakpoints)[i_sort]
        slopes = np.array(slopes)[i_sort]
        offsets = np.array(offsets)[i_sort]

        # drop breakpoints and corresponding values below min_state_value or above max_state_value
        for i in reversed(range(len(breakpoints))):
            if breakpoints[i] <= (self.min_state_value - 1e-6) or \
                    breakpoints[i] >= (self.max_state_value + 1e-6):
                breakpoints = np.delete(breakpoints, i)
                slopes = np.delete(slopes, i)
                offsets = np.delete(offsets, i)

        # make sure the minimal value is included in the breakpoints
        min_value_included = False
        for bp in breakpoints:
            if np.isclose(bp, min_state_value):
                min_value_included = True
                break
        if not min_value_included:
            breakpoints = np.append(min_state_value, breakpoints)
            slopes = np.append(0, slopes)
            offsets = np.append(0, offsets)

        # store parameters
        self._breakpoints = breakpoints
        self._slopes = slopes
        self._offsets = offsets
        self._f_min = f_min
        self._f_max = f_max
        self._c_approx = c_approx

        # get and store qubit indices
        self.i_state = None
        if i_state is not None:
            self.i_state = i_state
        else:
            self.i_state = list(range(num_state_qubits))

        self.i_objective = None
        if i_objective is not None:
            self.i_objective = i_objective
        else:
            self.i_objective = num_state_qubits

        # map breakpoints, slopes, and offsets such that they fit {0, ..., 2^n-1}
        lb = min_state_value
        ub = max_state_value
        self._mapped_breakpoints = []
        self._mapped_slopes = []
        self._mapped_offsets = []
        for i, _ in enumerate(breakpoints):
            mapped_breakpoint = (breakpoints[i] - lb) / (ub - lb) * (2**num_state_qubits - 1)
            if mapped_breakpoint <= 2**num_state_qubits - 1:
                self._mapped_breakpoints += [mapped_breakpoint]

                # factor (ub - lb) / (2^n - 1) is for the scaling of x to [l,u]
                # note that the +l for mapping to [l,u] is already included in
                # the offsets given as parameters
                self._mapped_slopes += [slopes[i] * (ub - lb) / (2**num_state_qubits - 1)]
                self._mapped_offsets += [offsets[i]]
        self._mapped_breakpoints = np.array(self._mapped_breakpoints)
        self._mapped_slopes = np.array(self._mapped_slopes)
        self._mapped_offsets = np.array(self._mapped_offsets)

        # approximate linear behavior by scaling and contracting around pi/4
        if len(self._mapped_breakpoints):  # pylint: disable=len-as-condition
            self._slope_angles = np.zeros(len(breakpoints))
            self._offset_angles = np.pi / 4 * (1 - c_approx) * np.ones(len(breakpoints))
            for i in range(len(breakpoints)):
                self._slope_angles[i] = \
                    np.pi * c_approx * self._mapped_slopes[i] / 2 / (f_max - f_min)
                self._offset_angles[i] += \
                    np.pi * c_approx * (self._mapped_offsets[i] - f_min) / 2 / (f_max - f_min)

            # multiply by 2 since Y-rotation uses theta/2 as angle
            self._slope_angles = 2 * self._slope_angles
            self._offset_angles = 2 * self._offset_angles

            # create piecewise linear Y rotation
            self._pwl_ry = PwlRot(
                self._mapped_breakpoints,
                self._slope_angles,
                self._offset_angles,
                num_state_qubits,
                i_state=i_state,
                i_target=i_objective
            )

        else:
            self.offset_angle = 0
            self.slope_angle = 0

            # create piecewise linear Y rotation
            self._pwl_ry = None
Example #44
0
m = 3* X_data.shape[0] // 10
trainX, trainY = X_data[m:], Y_data[m:]

trainX = (trainX- np.mean(trainX, axis=0))/ np.std(trainX, axis=0)

# create validation and test sets randomly
trainX = trainX[:1000]
trainY = trainY[:1000]
n = trainX.shape[0]
test_size = (1-validation_split)*n
random = np.random.randint(0, test_size)
start, end = int(random), int(random+test_size)
testX = trainX[start:end]
testY = trainY[start:end]
trainX = np.append(trainX[:start], trainX[end:], axis=0)
trainY = np.append(trainY[:start], trainY[end:], axis=0)

# Create the model
x = tf.placeholder(tf.float32, [None, NUM_FEATURES])
y_ = tf.placeholder(tf.float32, [None, 1])

# Build the graph for the deep net
weights_h1 = tf.Variable(tf.truncated_normal([NUM_FEATURES,num_neuron], stddev=0.001)) 
biases_h1 = tf.Variable(tf.zeros([num_neuron]))
h1 = tf.nn.relu(tf.matmul(x, weights_h1) + biases_h1)
h1 = tf.nn.dropout(h1, dropout)

weights_h2 = tf.Variable(tf.truncated_normal([num_neuron,20], stddev=0.001)) 
biases_h2 = tf.Variable(tf.zeros([20]))
h2 = tf.nn.relu(tf.matmul(h1, weights_h2) + biases_h2)
Example #45
0
    def create_first_guess_file(*args):
        """build a first guess file based on a list of pressure levels and an fov.nc file

        This method requires an fov.nc file as generated by create_fov_file. It uses the
        Virtual Radiosonde code to get GFS data and then modifies it to create an fg.nc file.

        :param args:
        :return:
        """

        log.info(
            "Generating first guess file from GFS data and fov file positioning information"
        )

        # we need access to the fov file and an input pressure list in order to run the
        # virtual radiosonde, so if we don't have those, just stop
        if options.fov_base is None or options.plevels_input is None:
            log.warn(
                "Unable to create first guess file without input fov file and input pressure levels."
            )
            return

        log.info("Loading lon/lat and times from FOV file")

        # open the fov file and pull the lon / lat and time information
        fov_file = nc.Dataset(clean_path(options.fov_base), 'r')
        lon_data = fov_file.variables[OUT_FOV_LON_VAR_NAME][:]
        lat_data = fov_file.variables[OUT_FOV_LAT_VAR_NAME][:]
        time_data = fov_file.variables[
            OUT_FOV_TIME_OFFSET_VAR_NAME][:] + fov_file.variables[
                OUT_FOV_BASE_TIME_VAR_NAME][0]
        fov_file.close()

        # build the list of time / lon / lat dictionaries
        dt_times = []
        # convert the epoch seconds format to datetimes
        for epoch_seconds in time_data:
            dt_times.append(datetime.fromtimestamp(epoch_seconds))
        # make the list of dictionaries representing each point
        desired_points = []
        for index in range(0, lon_data.size):
            desired_points.append({
                VR_INPUT_DATETIME_KEY: dt_times[index],
                VR_INPUT_LAT_KEY: lat_data[index],
                VR_INPUT_LON_KEY: lon_data[index]
            })
        num_obs = len(desired_points)

        log.info("Loading pressure levels from file")

        #print("Number of selected points: " + str(num_obs))

        # open the pressure levels file and get the list of pressure levels
        plvls_file = nc.Dataset(clean_path(options.plevels_input), 'r')
        plvls_data = numpy.sort(
            plvls_file.variables[INPUT_PRESSURE_LEVELS_VAR_NAME][:])[::-1]
        num_plvls = plvls_data.size
        plvls_file.close()

        log.info("Running Virtual Radiosonde code")

        # call the virtual radiosonde to get data to start with
        stamp = datetime.now().strftime('%s')
        cache_dir = os.path.join('/tmp/vr/', stamp)
        if not os.path.exists(cache_dir):
            os.mkdir(cache_dir)
        # confirmed that the interpolation kwarg is only for temporal interpolation (spatial interpolation is always bilinear)
        # TODO, may want to have a user knob to control temporal interpolation type
        src = radiosonde.VirtualRadiosondeNarrator(on_dread=True,
                                                   levels=plvls_data,
                                                   cache=cache_dir,
                                                   channels=CHANNELS_TEMP)
        results = list(src(desired_points))

        #print("original pressures: " + str(plvls_data))

        #print("results:    " + str(results[0].keys()))
        #print("tdry shape: " + str(results[0][VR_TEMPERATURE_KEY].shape))
        #print("pres shape: " + str(results[0]['pres'].shape))

        log.info("Creating fg.nc file")

        # create the first guess file
        # TODO, check existence for dir and file
        out_fg_file = nc.Dataset(os.path.join(options.output,
                                              OUT_FG_FILE_NAME),
                                 'w',
                                 format="NETCDF3_CLASSIC")

        # build the dimensions for the first guess file
        num_emiss_consts = SURFACE_EMISSIVITY_COEFFICIENTS.size
        state_vector_size = num_plvls * 4 + 1 + num_emiss_consts
        out_fg_file.createDimension(OUT_FG_NUM_STATEVAR_DIM_NAME,
                                    size=state_vector_size)
        out_fg_file.createDimension(OUT_FG_OBS_NUM_DIM_NAME, size=num_obs)
        out_fg_file.createDimension(OUT_FG_STATEVAR_DIMS_DIM_NAME, size=6)
        out_fg_file.createDimension(
            OUT_FG_NUM_SELECTED_STATEVAR_DIM_NAME, size=state_vector_size
        )  # TODO, don't know how these are selected yet

        # build the various first guess file variables using the virtual radiosonde data

        # create the first guess state vector
        # this is built up of several different things:
        """
        Temperature in [K] <- num_plvls values
		Water vapor in [log(q)] where q is psecific humidity in [kg/kg] <- num_plvls values
		Carbon dioxide [ ppmv ] <- num_plvls values (may be constant repeated or from GFS data)
		Ozone  in [log(q)] where q is psecific humidity in [kg/kg] <- num_plvls values (may be constant repeated or from GFS data)
		Surface temperature in [K] <- one value (probably from GFS data?)
		Surface emissivity principal component coefficients in logit space <- 5 values, constants from Paolo
        """

        # allocate some space to hold the values for the parts of the state vector
        temperature = numpy.ones(
            (num_obs, num_plvls),
            dtype=numpy.float32) * numpy.nan  # this is the temp profile
        pressure = numpy.ones(
            (num_obs, num_plvls),
            dtype=numpy.float32) * numpy.nan  # this is the pressure profile
        water_vapor = numpy.ones(
            (num_obs, num_plvls), dtype=numpy.float32) * numpy.nan
        c02_value = numpy.ones(
            (num_obs, num_plvls),
            dtype=numpy.float32) * C02_CONST_STARTING_PT_IN_PPMV
        ozone = numpy.ones(
            (num_obs, num_plvls), dtype=numpy.float32) * numpy.nan
        surface_temp = numpy.ones(
            (num_obs, 1), dtype=numpy.float32) * numpy.nan
        surface_pres = numpy.ones(
            (num_obs, 1), dtype=numpy.float32) * numpy.nan

        # pull the appropriate values out of the virtual radiosonde data
        for index in range(0, len(results)):
            current_pt = results[index]
            temperature[index] = current_pt[
                VR_TEMPERATURE_KEY] + CELSIUS_TO_KELVIN_ADD_CONST  # vr is in C, we need K
            pressure[index] = current_pt[VR_PRESSURE_KEY]
            # water vapor is the log of specific water vapor
            water_vapor_temp = relative_humidity_to_specific_humidity(
                current_pt['rh'] / 100.0, temperature[index])
            water_vapor_temp[
                water_vapor_temp <
                WATER_VAPOR_MINIMUM] = WATER_VAPOR_MINIMUM  # make sure we have a minimum so we get valid results from the log
            water_vapor[index] = numpy.log(water_vapor_temp)
            # ozone is the log of the specific humidity
            temp_ozone_mr = current_pt[VR_OZONE_MR_KEY]
            ozone[index] = numpy.log(
                temp_ozone_mr / (temp_ozone_mr + 1)
            )  # convert from mixing ratio to specific humidity and take the log
            # Note: for very small mixing ratios of ozone this conversion may not make a lot of different
            surface_temp[index] = current_pt[
                VR_SURFACE_TEMPERATURE_KEY]  # surface temperature is already in K
            surface_pres[index] = current_pt[VR_SEA_SURFACE_PRESSURE_KEY]

        # create the base state vector
        state_vector_data = numpy.append(temperature, water_vapor, axis=1)
        state_vector_data = numpy.append(state_vector_data, c02_value, axis=1)
        state_vector_data = numpy.append(state_vector_data, ozone, axis=1)
        state_vector_data = numpy.append(state_vector_data,
                                         surface_temp,
                                         axis=1)
        temp_coeffs = numpy.reshape(
            numpy.tile(SURFACE_EMISSIVITY_COEFFICIENTS, num_obs),
            (num_obs, num_emiss_consts))
        state_vector_data = numpy.append(state_vector_data,
                                         temp_coeffs,
                                         axis=1)

        # create the pressure version of the state vector
        press_vector_data = numpy.append(pressure, pressure, axis=1)
        press_vector_data = numpy.append(press_vector_data, pressure, axis=1)
        press_vector_data = numpy.append(press_vector_data, pressure, axis=1)
        press_vector_data = numpy.append(press_vector_data,
                                         surface_pres,
                                         axis=1)
        press_vector_data = numpy.append(press_vector_data,
                                         surface_pres,
                                         axis=1)
        press_vector_data = numpy.append(press_vector_data,
                                         surface_pres,
                                         axis=1)
        press_vector_data = numpy.append(press_vector_data,
                                         surface_pres,
                                         axis=1)
        press_vector_data = numpy.append(press_vector_data,
                                         surface_pres,
                                         axis=1)
        press_vector_data = numpy.append(press_vector_data,
                                         surface_pres,
                                         axis=1)

        # create xa and x0
        temp_var = out_fg_file.createVariable(
            OUT_FG_LIN_POINT_VAR_NAME, 'f8',
            (OUT_FG_OBS_NUM_DIM_NAME, OUT_FG_NUM_STATEVAR_DIM_NAME))
        temp_var[0:num_obs, 0:state_vector_size] = state_vector_data
        temp_var = out_fg_file.createVariable(
            OUT_FG_FIRST_GUESS_STATE_VEC_VAR_NAME, 'f8',
            (OUT_FG_OBS_NUM_DIM_NAME, OUT_FG_NUM_STATEVAR_DIM_NAME))
        temp_var[0:num_obs, 0:state_vector_size] = state_vector_data

        # create p
        temp_var = out_fg_file.createVariable(
            OUT_FG_PRESSURE_GRID_VAR_NAME, 'f8',
            (OUT_FG_OBS_NUM_DIM_NAME, OUT_FG_NUM_STATEVAR_DIM_NAME))
        temp_var[0:num_obs, 0:state_vector_size] = press_vector_data

        # create xdim
        temp_var = out_fg_file.createVariable(
            OUT_FG_STATE_VECTOR_DIMS_VAR_NAME, 'f8',
            (OUT_FG_STATEVAR_DIMS_DIM_NAME))
        temp_var[0:6] = numpy.array(
            [num_plvls, num_plvls, num_plvls, num_plvls, 1, num_emiss_consts])

        # create varindx
        temp_selected_indx = numpy.array(range(
            0, state_vector_size))  # TODO, don't know how these are selected
        temp_var = out_fg_file.createVariable(
            OUT_FG_SEL_STATE_VECTOR_IDX_VAR_NAME, 'f8',
            (OUT_FG_NUM_SELECTED_STATEVAR_DIM_NAME))
        temp_var[
            0:
            state_vector_size] = temp_selected_indx + 1  # use matlab indexing

        # create selxa and selx0
        temp_var = out_fg_file.createVariable(
            OUT_FG_SEL_LIN_POINT_VAR_NAME, 'f8',
            (OUT_FG_OBS_NUM_DIM_NAME, OUT_FG_NUM_SELECTED_STATEVAR_DIM_NAME))
        temp_var[0:num_obs, 0:state_vector_size] = state_vector_data
        temp_var = out_fg_file.createVariable(
            OUT_FG_SEL_FG_STATE_VEC_VAR_NAME, 'f8',
            (OUT_FG_OBS_NUM_DIM_NAME, OUT_FG_NUM_SELECTED_STATEVAR_DIM_NAME))
        temp_var[0:num_obs, 0:state_vector_size] = state_vector_data

        # create selp
        temp_var = out_fg_file.createVariable(
            OUT_FG_SEL_PRESSURE_GRID_VAR_NAME, 'f8',
            (OUT_FG_OBS_NUM_DIM_NAME, OUT_FG_NUM_SELECTED_STATEVAR_DIM_NAME))
        temp_var[0:num_obs, 0:state_vector_size] = press_vector_data

        # close the finished file
        out_fg_file.close()

        log.info("Finished saving fg.nc to file")
def create_npy_data(train_imgs_path, is_train):
    # empty matrix to hold patches
    patches_training_imgs_2d = np.empty(shape=[0, patch_size, patch_size],
                                        dtype='int16')
    patches_training_gtruth_2d = np.empty(
        shape=[0, patch_size, patch_size, num_classes], dtype='int16')

    #76 pancreastis plus 20 pacnreas data as training data ,19 as test data.
    images_train_dir = sorted(os.listdir(train_imgs_path))
    train_gts_dir = sorted(os.listdir(train_gts_path))

    #print(images_train_dir)
    #print(train_gts_dir)

    start_time = time.time()

    j = 0
    print('-' * 30)
    print('Creating training2d_patches...')
    print('-' * 30)

    # for each volume do:
    for img_dir_name, gt_dir_name in zip(images_train_dir, train_gts_dir):
        patches_training_imgs_2d_temp = np.empty(
            shape=[0, patch_size, patch_size], dtype='int16')
        patches_training_gtruth_2d_temp = np.empty(
            shape=[0, patch_size, patch_size, num_classes], dtype='int16')
        print('Processing: volume {0} / {1} volume images'.format(
            j + 1, len(images_train_dir)))

        # volume
        img_name = img_dir_name
        img_name = os.path.join(train_imgs_path, img_dir_name)

        # groundtruth
        img_seg_name = os.path.join(train_gts_path, gt_dir_name)

        # load volume, gt
        img = nib.load(img_name).get_data()
        img_data = img
        img_data = np.squeeze(img_data)
        #0-1 normalizaion
        img_data = (img_data - np.min(img_data)) / (np.max(img_data) -
                                                    np.min(img_data))

        img_gtruth = nib.load(img_seg_name).get_data()
        img_gtruth_data = img_gtruth
        img_gtruth_data = np.squeeze(img_gtruth_data)

        print(img_data.shape, img_gtruth_data.shape, '8888888888888888')
        print(img_dir_name, gt_dir_name)
        # for each slice do
        for slice in range(img_gtruth_data.shape[2]):
            patches_training_imgs_2d_slice_temp = np.empty(
                shape=[0, patch_size, patch_size], dtype='int16')
            patches_training_gtruth_2d_slice_temp = np.empty(
                shape=[0, patch_size, patch_size, num_classes], dtype='int16')
            if np.count_nonzero(
                    img_gtruth_data[:, :,
                                    slice]) > 900:  #pancreatitis over 100

                # extract patches of the jth volum image
                imgs_patches, gt_patches = extract_2d_patches(img_data[:,:,slice], \
                                                              img_gtruth_data[:,:,slice])

                # update database
                patches_training_imgs_2d_slice_temp = np.append(
                    patches_training_imgs_2d_slice_temp, imgs_patches, axis=0)
                patches_training_gtruth_2d_slice_temp = np.append(
                    patches_training_gtruth_2d_slice_temp, gt_patches, axis=0)

            patches_training_imgs_2d_temp = np.append(
                patches_training_imgs_2d_temp,
                patches_training_imgs_2d_slice_temp,
                axis=0)
            patches_training_gtruth_2d_temp = np.append(
                patches_training_gtruth_2d_temp,
                patches_training_gtruth_2d_slice_temp,
                axis=0)

        patches_training_imgs_2d = np.append(patches_training_imgs_2d,
                                             patches_training_imgs_2d_temp,
                                             axis=0)
        patches_training_gtruth_2d = np.append(patches_training_gtruth_2d,
                                               patches_training_gtruth_2d_temp,
                                               axis=0)
        j += 1
        X = patches_training_imgs_2d.shape
        Y = patches_training_gtruth_2d.shape
        print('shape im: [{0} , {1} , {2}]'.format(X[0], X[1], X[2]))
        print('shape gt: [{0} , {1} , {2}, {3}]'.format(
            Y[0], Y[1], Y[2], Y[3]))

    #convert to single precission
    patches_training_imgs_2d = patches_training_imgs_2d.astype('float32')
    patches_training_imgs_2d = np.expand_dims(patches_training_imgs_2d, axis=3)

    end_time = time.time()
    print("Elapsed time was %g seconds" % (end_time - start_time))

    X = patches_training_imgs_2d.shape
    Y = patches_training_gtruth_2d.shape

    print('-' * 30)
    print('Training set detail...')
    print('-' * 30)
    print('shape im: [{0} , {1} , {2}, {3}]'.format(X[0], X[1], X[2], X[3]))
    print('shape gt: [{0} , {1} , {2}, {3}]'.format(Y[0], Y[1], Y[2], Y[3]))

    S = patches_training_imgs_2d.shape
    print('Done: {0} 2d patches added from {1} volume images'.format(S[0], j))
    print('Loading done.')

    print('Saving to .npy files done.')

    # save train or validation
    if is_train:
        np.save('C_imdbs_2d_patch_AP/patches_training_imgs_2d.npy',
                patches_training_imgs_2d)
        np.save('C_imdbs_2d_patch_AP/patches_training_gtruth_2d.npy',
                patches_training_gtruth_2d)
    else:
        np.save('C_imdbs_2d_patch_AP/patches_val_imgs_2d.npy',
                patches_training_imgs_2d)
        np.save('C_imdbs_2d_patch_AP/patches_val_gtruth_2d.npy',
                patches_training_gtruth_2d)
    print('Saving to .npy files done.')
Example #47
0
print(mesh)

for spin in new_bs.bands.keys():

    ebands = new_bs.bands[spin]

    ebands -= new_bs.efermi - mu

    plane_bands = []

    sorted_energies = []

    new_sorted_energies = []

    dis_array = np.array([
        plane_dist(np.append(np.array(slice_array), 0.0), i) for i in kpoints
    ])

    for i, value in enumerate(slice_array):
        if not value == 0:
            if i == 0:
                sortd_indx = np.argsort(dis_array)
                plane_mesh = [mesh[2], mesh[1]]
            if i == 1:
                sortd_indx = np.argsort(dis_array)
                plane_mesh = [mesh[2], mesh[0]]

            if i == 2:
                sortd_indx = np.argsort(dis_array)
                plane_mesh = [mesh[0], mesh[1]]
Example #48
0
def evaluate(args,
             model,
             tokenizer,
             labels,
             pad_token_label_id,
             mode,
             prefix=""):
    eval_dataset = load_and_cache_examples(args,
                                           tokenizer,
                                           labels,
                                           pad_token_label_id,
                                           mode=mode)

    args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
    # Note that DistributedSampler samples randomly
    eval_sampler = SequentialSampler(
        eval_dataset) if args.local_rank == -1 else DistributedSampler(
            eval_dataset)
    eval_dataloader = DataLoader(eval_dataset,
                                 sampler=eval_sampler,
                                 batch_size=args.eval_batch_size)

    # multi-gpu evaluate
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Eval!
    logger.info("***** Running evaluation %s *****", prefix)
    logger.info("  Num examples = %d", len(eval_dataset))
    logger.info("  Batch size = %d", args.eval_batch_size)
    eval_loss = 0.0
    nb_eval_steps = 0
    preds = None
    out_label_ids = None
    model.eval()
    for batch in tqdm(eval_dataloader, desc="Evaluating"):
        batch = tuple(t.to(args.device) for t in batch)

        with torch.no_grad():
            inputs = {
                "input_ids": batch[0],
                "attention_mask": batch[1],
                "labels": batch[3]
            }
            if args.model_type != "distilbert":
                inputs["token_type_ids"] = (
                    batch[2] if args.model_type in ["bert", "xlnet"] else None
                )  # XLM and RoBERTa don"t use segment_ids
            outputs = model(**inputs)
            tmp_eval_loss, logits = outputs[:2]

            if args.n_gpu > 1:
                tmp_eval_loss = tmp_eval_loss.mean(
                )  # mean() to average on multi-gpu parallel evaluating

            eval_loss += tmp_eval_loss.item()
        nb_eval_steps += 1
        if preds is None:
            preds = logits.detach().cpu().numpy()
            out_label_ids = inputs["labels"].detach().cpu().numpy()
        else:
            preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
            out_label_ids = np.append(out_label_ids,
                                      inputs["labels"].detach().cpu().numpy(),
                                      axis=0)

    eval_loss = eval_loss / nb_eval_steps
    preds = np.argmax(preds, axis=2)

    label_map = {i: label for i, label in enumerate(labels)}

    out_label_list = [[] for _ in range(out_label_ids.shape[0])]
    preds_list = [[] for _ in range(out_label_ids.shape[0])]

    for i in range(out_label_ids.shape[0]):
        for j in range(out_label_ids.shape[1]):
            if out_label_ids[i, j] != pad_token_label_id:
                out_label_list[i].append(label_map[out_label_ids[i][j]])
                preds_list[i].append(label_map[preds[i][j]])

    results = {
        "loss": eval_loss,
        "precision": precision_score(out_label_list, preds_list),
        "recall": recall_score(out_label_list, preds_list),
        "f1": f1_score(out_label_list, preds_list),
    }

    logger.info("***** Eval results %s *****", prefix)
    for key in sorted(results.keys()):
        logger.info("  %s = %s", key, str(results[key]))

    return results, preds_list
Example #49
0
    return tr_sampler,d_sampler,tv_sampler,te_sampler


# In[27]:


#Loads and appends all folds all at once
trainfolds = []    # Train set
testfolds = []    # Test set (LEARNED)
testfolds_U = []    # Test set (UNLEARNED)

col_select = np.array([])

#This is an hack to test smaller windows
for i in range (spw*nmuscles,200):
    col_select = np.append(col_select,i)
    
for i in range (0,spw*nmuscles,nmuscles):
    for muscle in features_select:
        col_select = np.append(col_select,muscle -1 + i)
    cols=np.arange(0,spw*nmuscles+1)

if exclude_features & (not include_only_features): #delete gonio
    for j in range(fold_offset,fold_offset + nfold):
        print("Loading fold " + str(j))
        traindata = pd.read_table(os.path.join(cwd, prefix_train + str(j)+'.csv'),sep=',',header=None,dtype=np.float32,usecols=[i for i in cols if i not in col_select.astype(int)])
        trainfolds.append(traindata)
        testdata = pd.read_table(os.path.join(cwd, prefix_test + str(j)+'.csv'),sep=',',header=None,dtype=np.float32, usecols=[i for i in cols if i not in col_select.astype(int)])
        testfolds.append(testdata) 
elif include_only_features & (not exclude_features): #only gonio
    for j in range(fold_offset, fold_offset + nfold):
Example #50
0
    def create_fov_file(*args):
        """generate an fov file from an input SHIS data file

        This option generates a properly formatted fov.nc file from a Scanning HIS data file.

        Examples:
         python -m shis2mirto.shis2mirto create_fov_file
        """

        log.info("Generating FOV file from SHIS data")

        # we need SHIS input and a base FOV file to generate input
        if (options.shis_input is None) or (options.wnum_input is None):
            log.warn("Incomplete input, unable to generate FOV file")
            return

        # pull some things from the options
        center_angle = options.center_fov_angle
        angle_range = options.fov_angle_range
        shis_file = nc.Dataset(clean_path(options.shis_input), 'r')
        wn_base_file = nc.Dataset(clean_path(options.wnum_input), 'r')

        desired_wnums = numpy.sort(
            wn_base_file.variables[INPUT_WAVE_NUMBER_VAR_NAME][:])
        log.debug("desired wave numbers: " + str(desired_wnums))

        # look through the wave numbers in the shis file and figure out the appropriately matching
        # indexes to the wave numbers we want
        found_indexes = numpy.ones(desired_wnums.shape, dtype='int') * -1
        temp_wnums = shis_file.variables[SHIS_WAVE_NUMBER_VAR_NAME][:]
        current_target = 0
        for index in range(0, temp_wnums.size - 1):
            if current_target < found_indexes.size:
                desired = desired_wnums[current_target]
                if desired == temp_wnums[index]:
                    found_indexes[current_target] = index
                    current_target += 1
                elif desired == temp_wnums[index + 1]:
                    found_indexes[current_target] = index + 1
                    current_target += 1
                elif (desired > temp_wnums[index]) and (desired <
                                                        temp_wnums[index + 1]):

                    if (desired - temp_wnums[index]) < (temp_wnums[index + 1] -
                                                        desired):
                        found_indexes[current_target] = index
                        current_target += 1
                    else:
                        found_indexes[current_target] = index + 1
                        current_target += 1

                    # TODO need to check the tolerance of this fit

        # if we were unable to find a matching wave number for any of the desired
        # wave numbers, stop now
        if numpy.min(found_indexes) < 0:
            log.warn("Unable to find desired wave numbers in SHIS file")
            return

        # figure out where the acceptable fov angles fall
        temp_angles = shis_file.variables[SHIS_FOV_ANGLE_VAR_NAME][:]
        angle_mask = (temp_angles >= (center_angle - angle_range)) & (
            temp_angles <= (center_angle + angle_range))
        num_obs = numpy.sum(angle_mask)

        # find the global variables for our output fov file
        num_channels = temp_wnums.size
        num_selected_channels = found_indexes.size

        log.debug("radiances shape:  " +
                  str(shis_file.variables[SHIS_RADIANCE_VAR_NAME].shape))
        log.debug("num obs:          " + str(num_obs))
        log.debug("num channels:     " + str(num_channels))
        log.debug("num sel channels: " + str(num_selected_channels))

        # build the output file
        # TODO, check existence for dir and file
        out_fov_file = nc.Dataset(os.path.join(options.output,
                                               OUT_FOV_FILE_NAME),
                                  'w',
                                  format="NETCDF3_CLASSIC")

        # create the global dimensions we're going to need
        out_fov_file.createDimension(OUT_FOV_OBS_NUM_DIM_NAME, size=num_obs)
        out_fov_file.createDimension(OUT_FOV_NUM_CHANNELS_DIM_NAME,
                                     size=num_channels)
        out_fov_file.createDimension(OUT_FOV_NUM_SELECTED_CHANNELS_DIM_NAME,
                                     size=num_selected_channels)

        # put in the longitude and latitude variables
        temp_lon = shis_file.variables[SHIS_LON_VAR_NAME][angle_mask]
        temp_var = out_fov_file.createVariable(OUT_FOV_LON_VAR_NAME, 'f8',
                                               (OUT_FOV_OBS_NUM_DIM_NAME))
        temp_var[0:num_obs] = temp_lon
        temp_lat = shis_file.variables[SHIS_LAT_VAR_NAME][angle_mask]
        temp_var = out_fov_file.createVariable(OUT_FOV_LAT_VAR_NAME, 'f8',
                                               (OUT_FOV_OBS_NUM_DIM_NAME))
        temp_var[0:num_obs] = temp_lat

        # copy the various time variables
        temp_base_time = shis_file.variables[SHIS_BASE_TIME_VAR_NAME][0]
        print("base time: " + str(temp_base_time))
        temp_var = out_fov_file.createVariable(OUT_FOV_BASE_TIME_VAR_NAME,
                                               'f8')
        temp_var.assignValue(temp_base_time)
        temp_time_offset = shis_file.variables[SHIS_TIME_OFFSET_VAR_NAME][
            angle_mask]
        temp_var = out_fov_file.createVariable(OUT_FOV_TIME_OFFSET_VAR_NAME,
                                               'f8',
                                               (OUT_FOV_OBS_NUM_DIM_NAME))
        temp_var[0:num_obs] = temp_time_offset

        # also need the time in the matlab datenum format
        # "TimeFracDay == is the equivalent of the matlab datenum function, 1 corresponds to Jan-1-0000 "
        total_epoc_secs = temp_time_offset + temp_base_time
        matlab_times = numpy.zeros(total_epoc_secs.size, dtype=numpy.float32)
        for index in range(0, total_epoc_secs.size):
            matlab_times[index] = datetime_to_matlab_datenum(
                datetime.fromtimestamp(total_epoc_secs[index]))
        temp_var = out_fov_file.createVariable(
            OUT_FOV_MATLAB_DATENUM_TIME_VAR_NAME, 'f8',
            OUT_FOV_OBS_NUM_DIM_NAME)
        temp_var[0:num_obs] = matlab_times

        # put in the fov angles
        temp_fov_angles = temp_angles[angle_mask]
        temp_var = out_fov_file.createVariable(OUT_FOV_FOV_ANGLE_VAR_NAME,
                                               'f8',
                                               (OUT_FOV_OBS_NUM_DIM_NAME))
        temp_var[0:num_obs] = temp_fov_angles

        # get the full list of radiances
        rad_shape = shis_file.variables[SHIS_RADIANCE_VAR_NAME].shape
        # go through and pull out the appropriate observations
        all_obs_radiances = None
        for record_index in range(0, rad_shape[0]):
            if angle_mask[record_index]:
                if all_obs_radiances is None:
                    all_obs_radiances = shis_file.variables[
                        SHIS_RADIANCE_VAR_NAME][record_index]
                else:
                    all_obs_radiances = numpy.append(
                        all_obs_radiances,
                        shis_file.variables['radiance'][record_index])
        all_obs_radiances = numpy.reshape(all_obs_radiances,
                                          (num_obs, num_channels))
        """ TODO, why doesn't this strategy work?
        temp_angle_mask_3d = numpy.reshape(numpy.tile(numpy.reshape(angle_mask, (1,angle_mask.size)), rad_shape[0]), rad_shape) # make a mask for the radiances corresponding to the good fov angles
        print("3d angle mask shape:  " + str(temp_angle_mask_3d.shape))
        all_obs_radiances = numpy.reshape(shis_file.variables[SHIS_RADIANCE_VAR_NAME][temp_angle_mask_3d], (num_obs, num_channels))
        """
        temp_var = out_fov_file.createVariable(
            OUT_FOV_RADIANCE_VAR_NAME, 'f8',
            (OUT_FOV_OBS_NUM_DIM_NAME, 'channels'))
        temp_var[0:num_obs, 0:num_channels] = all_obs_radiances

        # get the full list of wave numbers
        temp_all_wavenums = shis_file.variables[SHIS_WAVE_NUMBER_VAR_NAME][:]
        temp_var = out_fov_file.createVariable(OUT_FOV_WAVE_NUMBER_VAR_NAME,
                                               'f8',
                                               (OUT_FOV_NUM_CHANNELS_DIM_NAME))
        temp_var[0:num_channels] = temp_all_wavenums

        # get the selected wave numbers
        selected_wavenums = temp_all_wavenums[found_indexes]
        temp_var = out_fov_file.createVariable(
            OUT_FOV_SELECTED_WAVE_NUMBER_VAR_NAME, 'f8',
            (OUT_FOV_NUM_SELECTED_CHANNELS_DIM_NAME))
        temp_var[0:num_selected_channels] = selected_wavenums

        # get the indexes of the selected channels
        temp_var = out_fov_file.createVariable(
            OUT_FOV_SELECTED_CHANNEL_IDX_VAR_NAME, 'f8',
            (OUT_FOV_NUM_SELECTED_CHANNELS_DIM_NAME))
        temp_var[
            0:
            num_selected_channels] = found_indexes + 1  # we will use matlab indexing here

        # get just the selected radiances
        selected_radiances = None
        for obs_number in range(0, num_obs):
            if selected_radiances is None:
                selected_radiances = all_obs_radiances[obs_number][
                    found_indexes]
            else:
                selected_radiances = numpy.append(
                    selected_radiances,
                    all_obs_radiances[obs_number][found_indexes])
        selected_radiances = numpy.reshape(selected_radiances,
                                           (num_obs, num_selected_channels))
        temp_var = out_fov_file.createVariable(
            OUT_FOV_SELECTED_RADIANCE_VAR_NAME, 'f8',
            (OUT_FOV_OBS_NUM_DIM_NAME, OUT_FOV_NUM_SELECTED_CHANNELS_DIM_NAME))
        temp_var[0:num_obs, 0:num_selected_channels] = selected_radiances

        # close the file
        out_fov_file.close()

        log.info("Finished saving fov.nc to file")
type(df)

samples = np.array(df.loc[:, :])
type(samples)

sample_matrix = np.asmatrix(df.loc[:, :])
type(sample_matrix)

samples = samples.T

print(samples)

mean_vector1 = []
for i in range(len(samples)):
    mean_vector1 = np.append(mean_vector1, np.mean(samples[i, :]))

print('Mean Vector:\n', mean_vector1)

covariance_mat = np.cov(samples)
cov1 = np.cov(samples[0]) + np.cov(samples[1]) + np.cov(samples[2])

# eigenvectors and eigenvalues for the from the covariance matrix
eigen_val, eigen_vec = np.linalg.eig(covariance_mat)
print("eig_val_cov\n", eigen_val)
print("eig_vec_cov\n", eigen_vec)

# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_val[i]), eigen_vec[:, i])
               for i in range(len(eigen_val))]
print(eigen_pairs)
Example #52
0
def evaluate(generator,
             model,
             iou_threshold=0.5,
             score_threshold=0.5,
             max_detections=3000,
             save_path=None,
             save_log=False):
    """ Evaluate a given dataset using a given model.
    # Arguments
        generator       : The generator that represents the dataset to evaluate.
        model           : The model to evaluate.
        iou_threshold   : The threshold used to consider when a detection is positive or negative.
        score_threshold : The score confidence threshold to use for detections.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save images with visualized detections to.
    # Returns
        A dict mapping class names to mAP scores.
    """
    # gather all detections and annotations
    all_detections = _get_detections(generator,
                                     model,
                                     score_threshold=score_threshold,
                                     max_detections=max_detections)
    all_annotations = _get_annotations(generator)
    #all_detections1= _get_detections(generator, model, score_threshold=0.5, max_detections=max_detections)
    average_precisions = {}
    if save_path is None:
        try:
            os.mkdir('Validation_Results/')
        except:
            pass
        finally:
            save_path = 'Validation_Results/'

    # all_detections = pickle.load(open('all_detections.pkl', 'rb'))
    # all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
    # pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
    # pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))

    # process detections and annotations
    for label in range(generator.num_classes()):
        if not generator.has_label(label):
            continue

        false_positives = np.zeros((0, ))
        false_positives_i = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        true_positives_i = np.zeros((0, ))
        false_negatives = np.zeros((0, ))
        false_negatives_i = np.zeros((0, ))
        false_positivesx = np.zeros((0, ))
        false_positives_ix = np.zeros((0, ))
        true_positivesx = np.zeros((0, ))
        true_positives_ix = np.zeros((0, ))
        false_negativesx = np.zeros((0, ))
        false_negatives_ix = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0

        for i in range(generator.size()):
            detections = all_detections[i][label]
            #detections1          = all_detections1[i][label]
            annotations = all_annotations[i][label]
            num_annotations += annotations.shape[0]
            detected_annotations = []
            detected_annotations1 = []
            #image1 = generator.load_image(i)[:,:,3].astype(np.uint8)
            #image1 = cv2.cvtColor(image1,cv2.COLOR_GRAY2BGR)
            #draw_annotations(image1, generator.load_annotations(i))

            for d in detections:
                image_boxes1 = d
                image_scores1 = d[4]
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)

                else:
                    if image_scores1 >= iou_threshold:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)

        # no annotations -> AP for this class is 0 (is this correct?)
        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # sort by score
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)
        print(true_positives)
        print(false_positives)
        # compute recall and precision
        """
#    nodes[inc,0] = files[inc][16:-20]

    ordre = np.argsort( nodes[:,0])
    nodes[:,0] = nodes[ordre,0]
    nodes[:,1] = nodes[ordre,1]
    nodes[:,2] = nodes[ordre,2]
    nodes[:,3] = nodes[ordre,3]
    return nodes



if __name__ == "__main__":

    path, path_s, s_name, file_time = PARAMS()
    nodes = get_nodes( path, path_s, s_name)

    if os.path.isfile( file_time):
        time = read_time( file_time)
        nodes = np.append( nodes, time)
    else:
        print(' ====> NO TIME FILE FOUND <=====')

    if not os.path.exists(path + path_s):
        os.makedirs(path + path_s)

    np.savetxt( path + path_s + s_name + '.txt', nodes )



Example #54
0
def data_augmentation(trainData, trainLabel, trainValids, segms=None):
    trainSegms = segms
    tremNum = cfg.nr_aug - 1
    gotData = trainData.copy()
    trainData = np.append(
        trainData, [trainData[0] for i in range(tremNum * len(trainData))],
        axis=0)
    if trainSegms is not None:
        gotSegm = trainSegms.copy()
        trainSegms = np.append(
            trainSegms,
            [trainSegms[0] for i in range(tremNum * len(trainSegms))],
            axis=0)
    trainLabel = np.append(
        trainLabel, [trainLabel[0] for i in range(tremNum * len(trainLabel))],
        axis=0)
    trainValids = np.append(
        trainValids,
        [trainValids[0] for i in range(tremNum * len(trainValids))],
        axis=0)
    counter = len(gotData)
    for lab in range(len(gotData)):
        ori_img = gotData[lab].transpose(1, 2, 0)
        if trainSegms is not None:
            ori_segm = gotSegm[lab].copy()
        annot = trainLabel[lab].copy()
        annot_valid = trainValids[lab].copy()
        height, width = ori_img.shape[0], ori_img.shape[1]
        center = (width / 2., height / 2.)
        n = cfg.nr_skeleton

        # affrat = random.uniform(0.75, 1.25)
        affrat = random.uniform(0.7, 1.35)
        halfl_w = min(width - center[0], (width - center[0]) / 1.25 * affrat)
        halfl_h = min(height - center[1], (height - center[1]) / 1.25 * affrat)
        # img = cv2.resize(ori_img[int(center[0] - halfl_w) : int(center[0] + halfl_w + 1), int(center[1] - halfl_h) : int(center[1] + halfl_h + 1)], (width, height))
        img = cv2.resize(
            ori_img[int(center[1] - halfl_h):int(center[1] + halfl_h + 1),
                    int(center[0] - halfl_w):int(center[0] + halfl_w + 1)],
            (width, height))
        if trainSegms is not None:
            segm = cv2.resize(
                ori_segm[int(center[1] - halfl_h):int(center[1] + halfl_h + 1),
                         int(center[0] - halfl_w):int(center[0] + halfl_w +
                                                      1)], (width, height))
        for i in range(n):
            annot[i << 1] = (annot[i << 1] - center[0]) / halfl_w * (
                width - center[0]) + center[0]
            annot[i << 1 | 1] = (annot[i << 1 | 1] - center[1]) / halfl_h * (
                height - center[1]) + center[1]
            annot_valid[i] *= ((annot[i << 1] >= 0) & (annot[i << 1] < width) &
                               (annot[i << 1 | 1] >= 0) &
                               (annot[i << 1 | 1] < height))

        trainData[lab] = img.transpose(2, 0, 1)
        if trainSegms is not None:
            trainSegms[lab] = segm
        trainLabel[lab] = annot
        trainValids[lab] = annot_valid

        # flip augmentation
        newimg = cv2.flip(img, 1)
        if trainSegms is not None:
            newsegm = cv2.flip(segm, 1)
        cod = []
        allc = []
        for i in range(n):
            x, y = annot[i << 1], annot[i << 1 | 1]
            if x >= 0:
                x = width - 1 - x
            cod.append((x, y))
        if trainSegms is not None:
            trainSegms[counter] = newsegm
        trainData[counter] = newimg.transpose(2, 0, 1)

        # **** the joint index depends on the dataset ****
        for (q, w) in cfg.symmetry:
            cod[q], cod[w] = cod[w], cod[q]
        for i in range(n):
            allc.append(cod[i][0])
            allc.append(cod[i][1])
        trainLabel[counter] = np.array(allc)
        allc_valid = annot_valid.copy()
        for (q, w) in cfg.symmetry:
            allc_valid[q], allc_valid[w] = allc_valid[w], allc_valid[q]
        trainValids[counter] = np.array(allc_valid)
        counter += 1

        # rotated augmentation
        for times in range(tremNum - 1):
            angle = random.uniform(0, 45)
            if random.randint(0, 1):
                angle *= -1
            rotMat = cv2.getRotationMatrix2D(center, angle, 1.0)
            newimg = cv2.warpAffine(img, rotMat, (width, height))
            if trainSegms is not None:
                newsegm = cv2.warpAffine(segm, rotMat, (width, height))

            allc = []
            allc_valid = []
            for i in range(n):
                x, y = annot[i << 1], annot[i << 1 | 1]
                coor = np.array([x, y])
                if x >= 0 and y >= 0:
                    R = rotMat[:, :2]
                    W = np.array([rotMat[0][2], rotMat[1][2]])
                    coor = np.dot(R, coor) + W
                allc.append(coor[0])
                allc.append(coor[1])
                allc_valid.append(annot_valid[i] *
                                  ((coor[0] >= 0) & (coor[0] < width) &
                                   (coor[1] >= 0) & (coor[1] < height)))

            newimg = newimg.transpose(2, 0, 1)
            trainData[counter] = newimg
            if trainSegms is not None:
                trainSegms[counter] = newsegm
            trainLabel[counter] = np.array(allc)
            trainValids[counter] = np.array(allc_valid)
            counter += 1
    if trainSegms is not None:
        return trainData, trainLabel, trainSegms
    else:
        return trainData, trainLabel, trainValids
Example #55
0
def evaluate(
    generator,
    model,
    iou_threshold=0.1,
    score_threshold=0.05,
    max_detections=100,
    save_path=None,
):
    """ Evaluate a given dataset using a given model.

    # Arguments
        generator       : The generator that represents the dataset to evaluate.
        model           : The model to evaluate.
        iou_threshold   : The threshold used to consider when a detection is positive or negative.
        score_threshold : The score confidence threshold to use for detections.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save images with visualized detections to.
    # Returns
        A dict mapping class names to mAP scores.
    """
    # gather all detections and annotations
    all_detections, all_inferences = _get_detections(
        generator,
        model,
        score_threshold=score_threshold,
        max_detections=max_detections,
        save_path=save_path,
    )
    all_annotations = _get_annotations(generator)
    average_precisions = {}

    # all_detections = pickle.load(open('all_detections.pkl', 'rb'))
    # all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
    # pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
    # pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))

    # process detections and annotations
    for label in range(generator.num_classes()):
        if not generator.has_label(label):
            continue

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))
        num_annotations = 0.0

        for i in range(generator.size()):
            detections = all_detections[i][label]
            annotations = all_annotations[i][label]
            num_annotations += annotations.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)
                    continue

                overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                           annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap = overlaps[0, assigned_annotation]

                if (max_overlap >= iou_threshold
                        and assigned_annotation not in detected_annotations):
                    false_positives = np.append(false_positives, 0)
                    true_positives = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives = np.append(true_positives, 0)

        # no annotations -> AP for this class is 0 (is this correct?)
        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # sort by score
        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)

        # compute recall and precision
        recall = true_positives / num_annotations
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)

        # compute average precision
        average_precision = _compute_ap(recall, precision)
        average_precisions[label] = average_precision, num_annotations

    # inference time
    inference_time = np.sum(all_inferences) / generator.size()

    return average_precisions, inference_time
Example #56
0
def schl_alloc(Nznew, nscnew, shn, gam0, M, rho, PS0, V1, V1sc):

	# --------------------------------------------------------------------
	# inputs:
	# Nznew - number of moles in 1st phase
	# nscnew - no. moles 2nd phase
	# shn - no. shells
	# gam0 - activity coefficients
	# M - molar masses (g/mol)
	# rho - densities (g/m3)
	# V1 - volumes of 1st phase shells (m^3)
	# V1sc - volumes of 2nd phase shells (m^3)
	# --------------------------------------------------------------------
	# sv mole fraction in all shells
	xsv0 = Nznew[0,:]/(np.sum(Nznew[:,:],axis=0))		

	# shells without LLPS
	is0 = nscnew[0,:]==0.0 # index
	for ic in range(1,M[:,0].shape[0]): # component loop
		is1 = (nscnew[ic,:]==0.0)
		is0 = is0+is1  	

	# mole fraction of sv in shells without LLPS
	xsv = Nznew[0,is0]/(np.sum(Nznew[:,is0],axis=0))	
	
	# difference in Gibbs free energy between 1 phase and phase separated 
	# system (if positive, phase separation may be favourable)
	delG=np.interp(xsv,PS0[:,0],PS0[:,1])		
	
	# return if no phase separation needed or separation has already 
	# happened
	if np.sum(delG>0)==0 or np.sum(is0==0)>0:	
		return Nznew, nscnew, V1, V1sc
	
	# sv mole fraction in shells where phase separation thermodynamically 
	# favourable
	xsv=xsv[delG>0.0]
	
	# shell index where partitioning could occur
	is0 = (xsv0==xsv)

	# semi-volatile (water) mole fractions of 2 phases
	xsv1=np.zeros((1,xsv.shape[0]))
	xsv2 = np.zeros((1,xsv.shape[0]))
	xsv1[:,:] = np.interp(xsv,PS0[:,0],PS0[:,2]) # new phase
	xsv2[:,:] = np.interp(xsv,PS0[:,0],PS0[:,3]) # existing phase	

	# the Gamma value at these mole fractions
	Gam1 = np.zeros((1,xsv.shape[0]))
	Gam2 = np.zeros((1,xsv.shape[0]))
	Gam1[:,:] = np.interp(xsv1,gam0[2,:],gam0[1,:])
	Gam2[:,:] = np.interp(xsv2,gam0[2,:],gam0[1,:])
	
	
	# return if diffusion does not promote separation
	
	ind1 = xsv1<xsv2
	ind1 = ind1*((Gam1+Gam2)/2.0>0.0)
	ind2 = xsv1>xsv2
	ind2 = ind2*((Gam1+Gam2)/2.0<0.0)
	if np.sum(ind1+ind2)==0:
		return Nznew, nscnew, V1, V1sc	
	
	# shell index where partitioning does occur
	is0 = np.squeeze((ind1+ind2)>0)

	# otherwise do partitioning between phases:	
		
	# partition current moles between phases 
	# required mole fraction in 1st phase (1st row) and 
	# 2nd phase (2nd row) of partitioning shell
	xreq = np.append(xsv2, xsv1,axis=0)
	
	a = xreq[1,:] # mole fraction in 2nd phase
	b = 0 # original moles sv in 2nd phase	
	d = 0 # original moles nv in 2nd phase
	f = xreq[0,:] # mole fraction in 1st phase
	
	g = Nznew[0,is0] # original moles sv in bulk (1 phase system)
	h = Nznew[1,is0] # original moles nv in bulk (1 phase system)
	
	
	ind = (xsv2==0.0)
	if np.sum(ind)>0:
		# number of moles sv to transfer
		c = 0.0
		# number of moles of non-volatile to transfer
		if xsv1==1.0: # move all nv out
			e=h	
		# rearrange mole fraction sv in phase 1 eq. to find 
		# no. moles nv needed in phase 1 and subtract from 
		# existing number
		else: 
			e = h-g*((1.0-xsv1)/xsv1)

	else:
		# number of moles sv to transfer
		c = (g+f*(-g-h+b/a-b-d))/(1.0-f/a)
		# number of moles of non-volatile to transfer
		e = b/a+c/a-b-c-d
		
	nnvmove = e
	nsvmove = c
	
	Nznew[0,is0] = Nznew[0,is0]-nsvmove
	Nznew[1,is0] = Nznew[1,is0]-nnvmove
	
	nscnew[0,is0] = nscnew[0,is0]+nsvmove
	nscnew[1,is0] = nscnew[1,is0]+nnvmove
	
	# molar volume of components (m3/mol)
	MV=M/rho
	# new shell volumes
	V1[0,:]=np.sum(Nznew[:,:]*MV, axis=0)	 
	V1sc[0,:]=np.sum(nscnew[:,:]*MV, axis=0)
	
	#print 'schl'
	#print xsv	
	#print xsv1
	#print xsv2	
	#print (Gam1+Gam2)/2.0	
	#print V1
	#print V1sc	
	#print Nznew[0,:]/np.sum(Nznew[:,:],0)
	#print nscnew[0,:]/np.sum(nscnew[:,:],0)
	#return

	return Nznew, nscnew, V1, V1sc
Example #57
0
def yinProb(yinBuffer, prior, yinBufferSize, minTau0, maxTau0):

    minTau = 2
    maxTau = yinBufferSize

    # adapt period range, if necessary
    if minTau0 > 0 and minTau0 < maxTau0: minTau = minTau0
    if maxTau0 > 0 and maxTau0 < yinBufferSize and maxTau0 > minTau: maxTau = maxTau0

    minWeight = 0.01
    thresholds = np.array([], dtype=np.float32)
    distribution = np.array([], dtype=np.float32)
    peakProb = np.zeros((yinBufferSize,), dtype=np.float64)

    nThreshold = 100
    nThresholdInt = nThreshold

    for i in range(nThresholdInt):

        thresholds = np.append(thresholds, np.double(0.01 + i*0.01))

        if prior == 0:
            distribution = np.append(distribution, uniformDist[i])
        elif prior == 1:
            distribution = np.append(distribution, betaDist1[i])
        elif prior == 2:
            distribution = np.append(distribution, betaDist2[i])
        elif prior == 3:
            distribution = np.append(distribution, betaDist3[i])
        elif prior == 4:
            distribution = np.append(distribution, betaDist4[i])
        elif prior == 5:
            distribution = np.append(distribution, single10[i])
        elif prior == 6:
            distribution = np.append(distribution, single15[i])
        elif prior == 7:
            distribution = np.append(distribution, single20[i])
        else:
            distribution = np.append(distribution, uniformDist[i])

    tau = minTau
    minInd = 0
    minVal = 42.0
    sumProb = 0.0

    while tau+1 < maxTau:
        # yinBuffer < 1 && ...
        if yinBuffer[tau] < thresholds[len(thresholds)-1] and yinBuffer[tau+1] < yinBuffer[tau]:
            # search for all dip points
            while tau + 1 < maxTau and yinBuffer[tau+1] < yinBuffer[tau]:
                tau += 1
            # tau is now local minimum,
            # because it's the turning point from yinBuffer[tau+1] < yinBuffer[tau] to yinBuffer[tau+1] >= yinBuffer[tau]
            if yinBuffer[tau] < minVal and tau > 2:
                minVal = yinBuffer[tau]  # mininum d'
                minInd = tau
            currThreshInd = nThresholdInt-1
            # formula (4), the threshold is on y-axis, the probability of P is the cumulation of distribution
            # when d'(tau) < threshold
            while thresholds[currThreshInd] > yinBuffer[tau] and currThreshInd > -1:
                peakProb[tau] += distribution[currThreshInd]
                currThreshInd -= 1

            sumProb += peakProb[tau]
            tau += 1
        else:
            tau += 1

    if peakProb[minInd] > 1:
        print "WARNING: yin has prob > 1 ??? I'm returning all zeros instead."
        return np.zeros((yinBufferSize,), dtype=np.float64)

    nonPeakProb = 1.0
    if sumProb > 0:
        for i in range(minTau, maxTau):
            # nomalization, the max prob will be peakProb[minInd]
            peakProb[i] = peakProb[i] / sumProb * peakProb[minInd]
            nonPeakProb -= peakProb[i]
    if minInd > 0:
        # adds nonPeakProb only for the prob with minimum d(tau)
        # because here we have a small threshold s, for all tau d'(tau) > s
        # we choose tau as the index of global minimum of d'
        peakProb[minInd] += nonPeakProb * minWeight

    return peakProb
Example #58
0
loss_array = None
mse_array = None
for epoch_idx in range(num_epochs):
    print('epochs : ' + str(epoch_idx))
    history = model.fit(x_train,
                        y_train,
                        epochs=1,
                        batch_size=batch_size,
                        verbose=2,
                        shuffle=False,
                        validation_data=(x_test, y_test),
                        callbacks=[early_stoppng, tb_hist])
    model.reset_states()
    # 훈련 상태가 변할지의 여부는 위에서 설정
    # 상태유지 LSTM은 fit할 때마다, evaluate 후에 reset_states() 호출해야함
    loss_array = np.append(loss_array, history.history['loss'])
    mse_array = np.append(mse_array, history.history['mean_squared_error'])
#    loss_list.append(history.history['loss'])
#    mse_list.append(history.history['mean_squared_error'])

mse, _ = model.evaluate(x_train, y_train, batch_size=batch_size)
print('mse : ', mse)
model.reset_states()

y_predict = model.predict(x_test, batch_size=batch_size)
print(y_predict[0:5])

# RMSE
from sklearn.metrics import mean_squared_error

Example #59
0
        # print nval
        # print indices_of_ones
        # print x_score
        # print y_score
        # print current_score

    current_iteration_location = np.argmax(score)

    current_row = init_array[current_iteration_location]
    number_of_ones = (current_row == 1).sum()
    indices_of_ones = np.where(current_row == True)[0]

    placed_locations[current_iteration_location] = True
    final_sensor_location.append(current_iteration_location)
    covered_locations[indices_of_ones] = True
    covered_indices = np.append(covered_indices, indices_of_ones)
    covered_indices = np.unique(covered_indices)

    uncovered_indices = np.array(
        list(set(uncovered_indices) - set(covered_indices)))

    pairwise_queue_detected_by_current = []
    for item in pairwise_event_queue:
        if current_row[item[0]] != current_row[item[1]]:
            pairwise_queue_detected_by_current.append(item)

    pairwise_event_queue = [
        combination for combination in pairwise_event_queue
        if combination not in pairwise_queue_detected_by_current
    ]
Example #60
0
    def _sim_prediction(self, theta, theta_t, Y, scores, h, t_params,
                        simulations):
        """ Simulates a h-step ahead mean prediction

        Parameters
        ----------
        theta : np.array
            The past predicted values

        theta_t : np.array
            The past local linear trend

        Y : np.array
            The past data

        scores : np.array
            The past scores

        h : int
            How many steps ahead for the prediction

        t_params : np.array
            A vector of (transformed) latent variables

        simulations : int
            How many simulations to perform

        Returns
        ----------
        Matrix of simulations
        """

        model_scale, model_shape, model_skewness = self._get_scale_and_shape(
            t_params)

        sim_vector = np.zeros([simulations, h])

        for n in range(0, simulations):

            Y_exp = Y.copy()
            theta_exp = theta.copy()
            theta_t_exp = theta_t.copy()
            scores_exp = scores.copy()

            #(TODO: vectorize the inner construction here)
            for t in range(0, h):
                new_value1 = theta_t_exp[-1] + theta_exp[
                    -1] + t_params[0] * scores_exp[-1]
                new_value2 = theta_t_exp[-1] + t_params[1] * scores_exp[-1]

                if self.model_name2 == "Exponential GAS":
                    rnd_value = self.family.draw_variable(
                        1.0 / self.link(new_value1), model_scale, model_shape,
                        model_skewness, 1)[0]
                else:
                    rnd_value = self.family.draw_variable(
                        self.link(new_value1), model_scale, model_shape,
                        model_skewness, 1)[0]

                Y_exp = np.append(Y_exp, [rnd_value])
                theta_exp = np.append(theta_exp,
                                      [new_value1])  # For indexing consistency
                theta_t_exp = np.append(theta_t_exp, [new_value2])
                scores_exp = np.append(scores_exp, scores[np.random.randint(
                    scores.shape[0])])  # expectation of score is zero

            sim_vector[n] = Y_exp[-h:]

        return np.transpose(sim_vector)