Exemplo n.º 1
0
def hinge_loss(X, y, theta, reg_beta=0.0):
    """Computes hinge loss and gradient.

    See square_loss for arguments and return value.
    """
    k, n = X.shape
    # margin is (k, 1)
    margin = y * X.dot(theta)
    loss = (np.sum(np.maximum(np.zeros_like(margin), 1 - margin)) / k +
            np.dot(theta.T, theta) * reg_beta / 2)

    dtheta = np.zeros_like(theta)
    # yx is (k, n) where the elementwise multiplication by y is broadcast across
    # the whole X.
    yx = y * X
    # We're going to select columns of yx, and each column turns into a vector.
    # Precompute the margin_selector vector which has for each j whether the
    # margin for that j was < 1.
    # Note: still keeping an explicit loop over n since I don't expect the
    # number of features to be very large. It's possible to fully vectorize this
    # but that would make the computation even more obscure. I'll do that if
    # performance becomes an issue with this version.
    margin_selector = (margin < 1).ravel()
    for j in range(n):
        # Sum up the contributions to the jth theta element gradient from all
        # input samples.
        dtheta[j, 0] = (np.sum(np.where(margin_selector, -yx[:, j], 0)) / k +
                        reg_beta * theta[j, 0])
    return loss.flat[0], dtheta
Exemplo n.º 2
0
    def backward_pass(self, accum_grad):
        _, timesteps, _ = accum_grad.shape

        # Variables where we save the accumulated gradient w.r.t each parameter
        grad_U = np.zeros_like(self.U)
        grad_V = np.zeros_like(self.V)
        grad_W = np.zeros_like(self.W)
        # The gradient w.r.t the layer input.
        # Will be passed on to the previous layer in the network
        accum_grad_next = np.zeros_like(accum_grad)

        # Back Propagation Through Time
        for t in reversed(range(timesteps)):
            # Update gradient w.r.t V at time step t
            grad_V += accum_grad[:, t].T.dot(self.states[:, t])
            # Calculate the gradient w.r.t the state input
            grad_wrt_state = accum_grad[:, t].dot(self.V) * self.activation.gradient(self.state_input[:, t])
            # Gradient w.r.t the layer input
            accum_grad_next[:, t] = grad_wrt_state.dot(self.U)
            # Update gradient w.r.t W and U by backprop. from time step t for at most
            # self.bptt_trunc number of time steps
            for t_ in reversed(np.arange(max(0, t - self.bptt_trunc), t+1)):
                grad_U += grad_wrt_state.T.dot(self.layer_input[:, t_])
                grad_W += grad_wrt_state.T.dot(self.states[:, t_-1])
                # Calculate gradient w.r.t previous state
                grad_wrt_state = grad_wrt_state.dot(self.W) * self.activation.gradient(self.state_input[:, t_-1])

        # Update weights
        self.U = self.U_opt.update(self.U, grad_U)
        self.V = self.V_opt.update(self.V, grad_V)
        self.W = self.W_opt.update(self.W, grad_W)

        return accum_grad_next
Exemplo n.º 3
0
def test_matrix_assemble(dim):
    eps = 1000*DOLFIN_EPS

    (u, uu), (v, vv), (U, UU), dPP, bc = _create_dp_problem(dim)

    # Scalar assemble
    mat = assemble(u*v*U*dPP)

    # Create a numpy matrix based on the local size of the vector
    # and populate it with values from local vector
    loc_range = u.vector().local_range()
    vec_mat = np.zeros_like(mat.array())
    vec_mat[range(loc_range[1] - loc_range[0]),
            range(loc_range[0], loc_range[1])] = u.vector().get_local()

    assert np.sum(np.absolute(mat.array() - vec_mat)) < eps

    # Vector assemble
    mat = assemble((uu[0]*vv[0]*UU[0] + uu[1]*vv[1]*UU[1])*dPP)

    # Create a numpy matrix based on the local size of the vector
    # and populate it with values from local vector
    loc_range = uu.vector().local_range()
    vec_mat = np.zeros_like(mat.array())
    vec_mat[range(loc_range[1] - loc_range[0]),
            range(loc_range[0], loc_range[1])] = uu.vector().get_local()

    assert np.sum(np.absolute(mat.array() - vec_mat)) < eps
Exemplo n.º 4
0
def dateRange(first, last):

    # Type check, float --> int
    if isinstance(first[0], float):
        temp = np.zeros_like(first, dtype='int')
        for i in xrange(temp.size):
            temp[i] = first[i]
        first = tuple(temp)

    if isinstance(last[0], float):
        temp = np.zeros_like(last, dtype='int')
        for i in xrange(temp.size):
            temp[i] = last[i]
        last = tuple(temp)

    # Initialize date dictionary
    dateList = {}

    # Populate dictionary
    first = dt.datetime(*first[:6])
    last = dt.datetime(*last[:6])
    n = (last + dt.timedelta(days=1) - first).days
    dateList['year'] = np.array([(first + dt.timedelta(days=i)).year for i in xrange(n)])
    dateList['month'] = np.array([(first + dt.timedelta(days=i)).month for i in xrange(n)])
    dateList['day'] = np.array([(first + dt.timedelta(days=i)).day for i in xrange(n)])

    return dateList
Exemplo n.º 5
0
def reg(psf_model, parms):
    """
    Regularization and derivative.
    """
    eps = parms.eps
    if (eps is None):
        return np.zeros_like(psf_model)

    psf_shape = psf_model.shape
    d = np.zeros_like(psf_model)
    r = np.zeros_like(psf_model)
    for i in range(psf_shape[0]):
        for j in range(psf_shape[1]): 
            if i > 0:
                r[i, j] += (psf_model[i, j] - psf_model[i - 1, j]) ** 2.
                d[i, j] += 2. * (psf_model[i, j] - psf_model[i - 1, j]) 
            if j > 0:
                r[i, j] += (psf_model[i, j] - psf_model[i, j - 1]) ** 2.
                d[i, j] += 2. * (psf_model[i, j] - psf_model[i, j - 1]) 
            if i < psf_shape[0] - 1:
                r[i, j] += (psf_model[i, j] - psf_model[i + 1, j]) ** 2.
                d[i, j] += 2. * (psf_model[i, j] - psf_model[i + 1, j]) 
            if j < psf_shape[1] - 1:
                r[i, j] += (psf_model[i, j] - psf_model[i, j + 1]) ** 2.
                d[i, j] += 2. * (psf_model[i, j] - psf_model[i, j + 1]) 
    r *= eps
    d *= eps
    return r, d
Exemplo n.º 6
0
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
    """Evaluate a quadratic spline at the new set of points.

    `dx` is the old sample-spacing while `x0` was the old origin.  In
    other-words the old-sample points (knot-points) for which the `cj`
    represent spline coefficients were at equally-spaced points of::

      oldx = x0 + j*dx  j=0...N-1, with N=len(cj)

    Edges are handled using mirror-symmetric boundary conditions.

    """
    newx = (asarray(newx) - x0) / dx
    res = zeros_like(newx)
    if res.size == 0:
        return res
    N = len(cj)
    cond1 = newx < 0
    cond2 = newx > (N - 1)
    cond3 = ~(cond1 | cond2)
    # handle general mirror-symmetry
    res[cond1] = qspline1d_eval(cj, -newx[cond1])
    res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
    newx = newx[cond3]
    if newx.size == 0:
        return res
    result = zeros_like(newx)
    jlower = floor(newx - 1.5).astype(int) + 1
    for i in range(3):
        thisj = jlower + i
        indj = thisj.clip(0, N - 1)  # handle edge cases
        result += cj[indj] * quadratic(newx - thisj)
    res[cond3] = result
    return res
Exemplo n.º 7
0
def viterbi_decode(score, transition_params):
  """Decode the highest scoring sequence of tags outside of TensorFlow.

  This should only be used at test time.

  Args:
    score: A [seq_len, num_tags] matrix of unary potentials.
    transition_params: A [num_tags, num_tags] matrix of binary potentials.

  Returns:
    viterbi: A [seq_len] list of integers containing the highest scoring tag
        indicies.
    viterbi_score: A float containing the score for the Viterbi sequence.
  """
  trellis = np.zeros_like(score)
  backpointers = np.zeros_like(score, dtype=np.int32)
  trellis[0] = score[0]

  for t in range(1, score.shape[0]):
    v = np.expand_dims(trellis[t - 1], 1) + transition_params
    trellis[t] = score[t] + np.max(v, 0)
    backpointers[t] = np.argmax(v, 0)

  viterbi = [np.argmax(trellis[-1])]
  for bp in reversed(backpointers[1:]):
    viterbi.append(bp[viterbi[-1]])
  viterbi.reverse()

  viterbi_score = np.max(trellis[-1])
  return viterbi, viterbi_score
 def __init__(self,n_hidden,n_input,n_out,fnc = 'sigmoid',loss_fnc= softmax,batchsize = 10,epochs = 1,learning_rate = 0.1,reg = 0.0,momentum = 0.0):
             
     self.nn = {}
     self.nn['batchsize'] = batchsize
     self.nn['epochs'] = epochs
     self.nn['learning_rate'] = learning_rate
     self.nn['reg'] = reg
     self.nn['momentum'] = momentum
     self.nn['loss_fnc'] = loss_fnc
     self.nn['w1'] = np.random.randn(n_hidden*n_input).reshape(n_input,n_hidden)/math.sqrt(n_hidden*n_input)
     self.nn['b1'] = np.zeros(n_hidden).reshape(n_hidden)
     
     self.nn['w2'] = np.random.random(n_hidden*n_out).reshape(n_hidden,n_out)/math.sqrt(n_hidden*n_out)
     self.nn['b2'] = np.zeros(n_out).reshape(n_out)
     
     self.nn['dw1'] = np.zeros_like(self.nn['w1'])
     self.nn['db1'] = np.zeros_like(self.nn['b1'])
     
     self.nn['dw2'] = np.zeros_like(self.nn['w2'])
     self.nn['db2'] = np.zeros_like(self.nn['b2'])
     
     self.nn['p_dw1'] = self.nn['dw1']
     self.nn['p_db1'] = self.nn['db1']
     
     self.nn['p_dw2'] = self.nn['dw2']
     self.nn['p_db2'] = self.nn['db2']
Exemplo n.º 9
0
def totalvalue(cash_ini,orderform,valueform):
    
    trades = pd.read_csv(orderform,header=None,sep=',')
    trades = trades.dropna(axis = 1, how='all')
    trades.columns = ['Year','Month','Day','Symbol','Order','Share']
    dateall = []
    for i in np.arange(len(trades.Year)):
        dateall.append(dt.datetime(trades['Year'][i],trades['Month'][i],trades['Day'][i],16))
    dateall = pd.to_datetime(dateall)
    trades=trades.drop(['Year','Month','Day'],axis=1)
    trades['Date']=dateall
    trades.set_index('Date',inplace=True)
    
    ls_symbols = []
    for symbol in trades.Symbol:
        if symbol not in ls_symbols:
            ls_symbols.append(symbol)
            
    startdate = dateall[0]
    enddate = dateall[-1]
    dt_timeofday = dt.timedelta(hours=16)
    ldt_timestamps = du.getNYSEdays(startdate,enddate+dt_timeofday,dt_timeofday)
    ls_keys = 'close'
    c_dataobj = da.DataAccess('Yahoo')
    price = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
    orders = price*np.NaN
    orders = orders.fillna(0)
    for i in np.arange(len(trades.index)):
        ind = trades.index[i]
        if trades.ix[i,'Order']=='Buy':
            orders.loc[ind,trades.ix[i,'Symbol']]+=trades.ix[i,'Share']
        else:
            orders.loc[ind,trades.ix[i,'Symbol']]+=-trades.ix[i,'Share']
    #    keys = ['price','orders']
    #    trading_table = pd.concat([ldf_data,orders],keys=keys,axis=1)
    cash = np.zeros(np.size(price[ls_symbols[0]]),dtype=np.float)
    cash[0] = cash_ini
    # updating the cash value
    for i in np.arange(len(orders.index)):
        if i == 0: 
            cash[i] = cash[i] - pd.Series.sum(price.ix[i,:]*orders.ix[i,:])
        else:
            cash[i] = cash[i-1] - pd.Series.sum(price.ix[i,:]*orders.ix[i,:])
    # updating ownership
    ownership = orders*np.NaN
    for i in np.arange(len(orders.index)):
        ownership.ix[i,:]=orders.ix[:i+1,:].sum(axis=0) 
        
    # updating total portofolio value
    value = np.zeros_like(cash)
    for i in np.arange(len(ownership.index)):
        value[i] = pd.Series.sum(price.ix[i,:]*ownership.ix[i,:]) 
    keys = ['price','orders','ownership']
    trading_table = pd.concat([price,orders,ownership],keys = keys, axis=1)
    trading_table[('value','CASH')]=cash
    trading_table[('value','STOCK')]=value
    total = np.zeros_like(cash)
    total = cash + value
    trading_table[('value','TOTAL')]=total
    trading_table[('value','TOTAL')].to_csv(valueform)
Exemplo n.º 10
0
def get_image_from_kspace(k_real, k_imag):
    """
    Return image from real and imaginary k-space values
    :param k_real:
    :param k_imag:
    :return:
    """
    k_space = (np.zeros_like(k_real) + 0j).astype('complex64')
    ret_img = np.zeros_like(k_real)

    if len(k_real.shape) == 2:
        # Create k-space from real and imaginary part
        k_space.real = k_real
        k_space.imag = k_imag
        ret_img = np.abs(ifft2c(k_space)).astype(np.float32)

    else:
        for i in range(0, k_real.shape[0]):
            # Create k-space from image, assuming this is the fully-sampled k-space.
            k_space[i,:,:] = (np.zeros_like(k_real[i,:,:]) + 0j).astype('complex64')
            k_space[i,:,:].real = k_real[i,:,:]
            k_space[i,:,:].imag = k_imag[i,:,:]

            # Reconstruct image
            ret_img[i, :, :] = np.abs(ifft2c(k_space[i, :, :])).astype(np.float32)

    return ret_img
Exemplo n.º 11
0
    def finalize(self):
        """Calculates the flux, inverse variance and resolution for this spectrum.

        Uses the accumulated data from all += operations so far but does not prevent
        further accumulation.  This is the expensive step in coaddition so we make
        it something that you have to call explicitly.  If you forget to do this,
        the flux,ivar,resolution attributes will be None.

        If the coadded resolution matrix is not invertible, a warning message is
        printed and the returned flux vector is zero (but ivar and resolution are
        still valid).
        """
        # Convert to a dense matrix if necessary.
        if scipy.sparse.issparse(self.Cinv):
            self.Cinv = self.Cinv.todense()
        # What pixels are we using?
        mask = (np.diag(self.Cinv) > 0)
        keep = np.arange(len(self.Cinv_f))[mask]
        keep_t = keep[:,np.newaxis]
        # Initialize the results to zero.
        self.flux = np.zeros_like(self.Cinv_f)
        self.ivar = np.zeros_like(self.Cinv_f)
        R = np.zeros_like(self.Cinv)
        # Calculate the deconvolved flux,ivar and resolution for ivar > 0 pixels.
        self.ivar[mask],R[keep_t,keep] = decorrelate(self.Cinv[keep_t,keep])
        try:
            R_it = scipy.linalg.inv(R[keep_t,keep].T)
            self.flux[mask] = R_it.dot(self.Cinv_f[mask])/self.ivar[mask]
        except np.linalg.linalg.LinAlgError:
            self.log.warning('resolution matrix is singular so no coadded fluxes available.')
        # Convert R from a dense matrix to a sparse one.
        self.resolution = desispec.resolution.Resolution(R)
Exemplo n.º 12
0
def num_sdss_rand_both_catalogs(hemi, grid):
    d_rand = load_sdss_rand_both_catalogs(hemi)
    n_noisy = grid.num_from_radecz(d_rand['ra'],d_rand['dec'], d_rand['z'])
    # get the distance-from-observer 3d grid
    d_obs = grid.distance_from_observer()
    d_obs_max = d_obs[n_noisy>0].max()
    d_obs_1d = np.linspace(0, d_obs_max+1., 100)
    n_1d = np.zeros_like(d_obs_1d)
    delta_d_obs = d_obs_1d[1]-d_obs_1d[0]
    for i,this_d_obs in enumerate(d_obs_1d):
        wh=np.where((np.abs(d_obs-this_d_obs)<(0.5*delta_d_obs)) & (n_noisy>0))
        if len(wh[0])==0: continue
        n_1d[i] = np.median(n_noisy[wh])
    # now interpolate n_1d onto 3d grid
    from scipy import interpolate
    f = interpolate.interp1d(d_obs_1d, n_1d)
    n_median = np.zeros_like(n_noisy)
    wh_ok_interp = np.where((d_obs>np.min(d_obs_1d))&(d_obs<np.max(d_obs_1d)))    
    n_median[wh_ok_interp] = f(d_obs[wh_ok_interp])

    weight = np.zeros_like(n_median)
    weight[n_noisy>12]=1.
    n_median *= weight
    #pl.figure(1); pl.clf(); pl.imshow(n_noisy[:,:,128], vmin=0,vmax=n_median.max()); pl.colorbar()
    #pl.figure(2); pl.clf(); pl.imshow(n_median[:,:,128], vmin=0,vmax=n_median.max()); pl.colorbar()
    #ipdb.set_trace()
    return n_median, weight
def dataVtk_3dMatrix(points,bounds,vectors):
    """
    Function that turns a vtk output formated data to 3d field matrix data
    from [(x1,y1,z1),...,(xn,yn,zn)]
    to [[[[x1,y1,z1],[...],[x3,y1,z1]],[[x1,y2,z1],[...],[...]],[[x1,y3,z1],[...],[...]]]
    ,[[[x1,y1,z2],[...],[...]],[...],[...]] , [.........]]    
    -points => list of the coordinates of the poitns where the data is located.
    -bounds => bounds of the data.(Xmin,Xmax,Ymin,Ymax,Zmin,Zmax)
    -vectors => vector data of the field at the 'points'
    """
    #asign variables
    (xmin,xmax,ymin,ymax,zmin,zmax) = bounds
        
    #generate the output arrays
    grid3d = N.mgrid[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1]
    pnts3d = N.zeros_like(grid3d[0],dtype= N.ndarray)
    vect3d = N.zeros_like(grid3d[0],dtype= N.ndarray)
    
    #loop and rearange
    for i in range(len(points)):
        x_t = points[i][0]
        y_t = points[i][1]
        z_t = points[i][2]
        pnts3d[z_t+zmax][y_t+ymax][x_t+xmax] = points[i]
        vect3d[z_t+zmax][y_t+ymax][x_t+xmax] = vectors[i]
        
    return {'points':pnts3d,'vectors':vect3d}
    def _transform_dense(self, X):
        non_zero = (X != 0.0)
        X_nz = X[non_zero]

        X_step = np.zeros_like(X)
        X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)

        X_new = [X_step]

        log_step_nz = self.sample_interval_ * np.log(X_nz)
        step_nz = 2 * X_nz * self.sample_interval_

        for j in range(1, self.sample_steps):
            factor_nz = np.sqrt(step_nz /
                                np.cosh(np.pi * j * self.sample_interval_))

            X_step = np.zeros_like(X)
            X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
            X_new.append(X_step)

            X_step = np.zeros_like(X)
            X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
            X_new.append(X_step)

        return np.hstack(X_new)
Exemplo n.º 15
0
    def fit_deriv(cls, x, amplitude, x_0, width):
        """One dimensional Box model derivative with respect to parameters"""

        d_amplitude = cls.evaluate(x, 1, x_0, width)
        d_x_0 = np.zeros_like(x)
        d_width = np.zeros_like(x)
        return [d_amplitude, d_x_0, d_width]
Exemplo n.º 16
0
def _compute_rotations(positions, pos_seed, rot_seed):

    logger.info("compute_rotations: starting aux calculations")
    # logger.info("positions array {}".format(positions))
    # logger.debug("rot_seed = {}".format(rot_seed))
    # logger.debug("pos_seed = {}".format(pos_seed))

    delta = np.zeros_like(positions)
    delta[1:] = positions[1:] - positions[:-1]
    delta[0] = positions[0] - pos_seed
    # logger.debug("delta array {}".format(delta))

    z = np.zeros_like(delta)  # zero array like delta
    z[delta == -5] = 1  # where delta = -5, set increment to 1
    z[delta == 5] = -1  # where delta is 5, set increment to -1

    logger.info("compute_rotations: done with aux calculations")

    logger.info("compute_rotations: starting primary calculations")
    z[0] += rot_seed
    r = np.cumsum(z)

    logger.info("compute_rotations: done with primary calculations")

    return r
	def predictionToPosition(self,pi, dim = 64):
		pirescale = np.expand_dims(pi, axis=1)
		pirescale = np.append(pirescale, np.zeros_like(pirescale), axis=1)
		positions = np.zeros_like(pirescale)
		positions[:,0] = pirescale[:,0] // dim
		positions[:,1] = pirescale[:,0] % dim
		return positions
Exemplo n.º 18
0
    def gradientOfNormalizedRespectW(self, sequence):
        seqLength  = sequence.Length
        
        (logPotential0, logPotentials) = self.LogPotentialTable(sequence)
        #print(logPotentials)
        forwardMessages  = self.forward(logPotential0, logPotentials, seqLength)
        backwardMessages = self.backward(logPotentials, seqLength)

        b = forwardMessages[seqLength-1].max()
        logNormalized = np.log(np.exp(forwardMessages[seqLength-1] - b).sum()) + b

        WnodeGradient = np.zeros_like(self.mWnode)
        WedgeGradient = np.zeros_like(self.mWedge)

        for i in range(0, seqLength):
            for j in range(0, self.mLabelStateSize):
                #print(forwardMessages[i][j] + backwardMessages[i][j] - logNormalized)
                WnodeGradient[sequence.GetFeature(i, j)] += np.exp(forwardMessages[i][j] + backwardMessages[i][j] - logNormalized).clip(0.0, 1.0)
        
        for i in range(1, seqLength):
            for j in range(0, self.mLabelStateSize):
                for k in range(0, self.mLabelStateSize):
                    #print(forwardMessages[i-1][j] + logPotentials[i-1][j][k] + backwardMessages[i][k] - logNormalized)
                    WedgeGradient[sequence.GetFeature(i, j, k)] += np.exp(forwardMessages[i-1][j] + logPotentials[i-1][j][k] + backwardMessages[i][k] - logNormalized).clip(0.0, 1.0)

        # print(WnodeGradient, WedgeGradient)
        return (WnodeGradient, WedgeGradient)
Exemplo n.º 19
0
def generate_sky_model_y_hybrid(baselines,del_bl,num_bl,fits_file):
    """
    y is a vector of the visibilities at different baselines
    """
    healmap = a.map.Map(fromfits=fits_file)
    px_array = n.arange(healmap.npix()) # gets an array of healpix pixel indices
    rx,ry,rz = n.array(healmap.px2crd(px_array,ncrd=3)) # finds the topocentric coords for each healpix pixel
    phi,theta = n.array(healmap.px2crd(px_array,ncrd=2)) # phi,theta in math coords
    print px_array.shape
    true_sky = healmap.map.map

    beamsig_largebm = 10/(2*n.pi*del_bl*(num_bl-1))                                                            
    beamsig_smallbm = 10/(2*n.pi*del_bl)
    amp_largebm = uf.gaussian(beamsig_largebm,n.zeros_like(theta),phi)
    amp_smallbm = uf.gaussian(beamsig_smallbm,n.zeros_like(theta),phi)

    #smallbm_inds = (int(n.floor(num_bl/2)),int(n.floor(num_bl/2)))
    smallbm_ind = int(n.floor(num_bl/2))*(num_bl+1)+1

    dOmega = 4*n.pi/px_array.shape[0]

    visibilities = n.zeros(baselines.shape[0],dtype='complex')
    print baselines.shape[0]
    for kk in range(baselines.shape[0]):
        #print kk
        bx,by,bz = baselines[kk]
        if kk==smallbm_ind: amp = amp_smallbm                                                             
        else: amp = amp_largebm
        Vis = amp*true_sky*n.exp(2j*n.pi*(bx*rx+by*ry+bz*rz))*dOmega
        visibilities[kk] = n.sum(Vis)
    return visibilities
Exemplo n.º 20
0
 def pop(self, index=-1):
     if not isinstance(index, int):
         msg = "{0} indices must be integers, not {1}"
         raise TypeError(msg.format(self.__class__.__name__,
                                    index.__class__.__name__))
     if index < 0:
         index += self._len
     if index < 0:
         raise IndexError
     if self.storage == "numpy":
         ret_data = datacopy(self._data[index])
         ret_lengths = None
         if self._arity > 1:
             ret_lengths = _get_lenarray_empty(ret_data.shape)
         ret = self._element(
                 _mode="from_numpy",
                 data_store=ret_data,
                 len_data_store=ret_lengths,
             )
         self._data[index:self._len-1] = self._data[index+1:self._len]
         try:
             self._data[self._len-1] = np.zeros_like(self._data[self._len-1])
         except ValueError: # numpy bug
             for field in self._data.dtype.fields:
                 self._data[self._len-1][field] = np.zeros_like(self._data[self._len-1][field])
         self._del_child_lengths(index)
     elif self._elementary:
         ret = self._data[:self._len][index]
         self._data.__delitem__(index)
     else:
         ret = self._children[:self._len][index].copy()
         self._children.__delitem__(index)
         self._data.__delitem__(index)
     self._len -= 1
     return ret
def lu_fact(A):
    """
    Function for computing the LU factorization
    Inputs:
    A: Matrix of the system

    Output:
    L: Lower triangular matrix
    U: Upper triangular matrix

    """
    n = np.size(A, 1)
    L = np.zeros_like(A, float)
    U = np.zeros_like(A, float)

    for k in xrange(0, n-1):
        for i in xrange(k+1, n):
            if A[i, k] != 0.0:
                lam = A[i, k] / A[k, k]
                A[i, k+1:n] = A[i, k+1:n] - lam*A[k, k+1:n]
                A[i, k] = lam

    L = np.tril(A, -1) + np.identity(n)
    U = np.triu(A)
    return L, U
Exemplo n.º 22
0
def fdtd(input_grid, steps):
    grid = input_grid.copy()
    old_grid = np.zeros_like(input_grid)
    previous_grid = np.zeros_like(input_grid)

    l_x = grid.shape[0]
    l_y = grid.shape[1]

    for i in range(steps):
        np.copyto(previous_grid, old_grid)
        np.copyto(old_grid, grid)

        for x in range(l_x):
            for y in range(l_y):
                grid[x,y] = 0.0
                if 0 < x+1 < l_x:
                    grid[x,y] += old_grid[x+1,y]
                if 0 < x-1 < l_x:
                    grid[x,y] += old_grid[x-1,y]
                if 0 < y+1 < l_y:
                    grid[x,y] += old_grid[x,y+1]
                if 0 < y-1 < l_y:
                    grid[x,y] += old_grid[x,y-1]

                grid[x,y] /= 2.0
                grid[x,y] -= previous_grid[x,y]

    return grid
Exemplo n.º 23
0
    def __init__(self, network, **kwargs):
        # due to the way that theano handles updates, we cannot update a
        # parameter twice during the same function call. so, instead of handling
        # everything in the updates for self.f_learn(...), we split the
        # parameter updates into two function calls. the first "prepares" the
        # parameters for the gradient computation by moving the entire model one
        # step according to the current velocity. then the second computes the
        # gradient at that new model position and performs the usual velocity
        # and parameter updates.

        self.params = network.params(**kwargs)
        self.momentum = kwargs.get('momentum', 0.5)

        # set up space for temporary variables used during learning.
        self._steps = []
        self._velocities = []
        for param in self.params:
            v = param.get_value()
            n = param.name
            self._steps.append(theano.shared(np.zeros_like(v), name=n + '_step'))
            self._velocities.append(theano.shared(np.zeros_like(v), name=n + '_vel'))

        # step 1. move to the position in parameter space where we want to
        # compute our gradient.
        prepare = []
        for param, step, velocity in zip(self.params, self._steps, self._velocities):
            prepare.append((step, self.momentum * velocity))
            prepare.append((param, param + step))

        logging.info('compiling NAG adjustment function')
        self.f_prepare = theano.function([], [], updates=prepare)

        super(NAG, self).__init__(network, **kwargs)
Exemplo n.º 24
0
def sor(A, b):
    sol = []
    
    n = len(A)
    D = np.zeros_like(A)
    L = np.zeros_like(A)
    
    for i in range(0,n):
        D[i][i] = A[i][i];
        
    for i in range(0,n):
        for j in range(0,i):
            L[i][j] = -A[i][j];
    
    omega = omegafind(A,D)   
    Q = D/omega -L
    Tj = np.linalg.inv(Q).dot(Q-A)
    c = np.linalg.inv(Q).dot(b)
    x = np.zeros_like(b)
    

    for itr in range(ITERATION_LIMIT):
        x=Tj.dot(x) + c;

    sol = x
    
    return list(sol)
Exemplo n.º 25
0
 def filter_frames(self, data):
     data = data[0]
     lp = gaussian_filter(data, 100)
     hp = data - lp # poormans background subtraction
     hp -= np.min(hp)
     sh = hp.shape
     print "here"
     hp = hp.astype('uint32')
     hp = flex.int(hp)
     print "here now"
     
     mask = flex.bool(np.ones_like(hp).astype('bool'))
     print "here now"
     result1 = flex.bool(np.zeros_like(hp).astype('bool'))
     spots = np.zeros_like(hp).astype('bool')
     print "here now"
     
     for i in range(3, self.parameters['spotsize'], 5):
         print "here now"
         algorithm = DispersionThreshold(sh, (i, i), 1, 1, 0, -1)
         print "here now"
         print type(hp), type(mask), type(result1)
         thing = algorithm(hp, mask, result1)
         print "here now"
         spots = spots + result1.as_numpy_array()
     return [data, spots*data]
Exemplo n.º 26
0
def compute_normals(im_pos, n_offset=3):
    """
    Converts an XYZ image to a Normal Image
    --Input--
    im_pos : ndarray (NxMx3)
        Image with x/y/z values for each pixel
    n_offset : int
        Smoothness factor for calculating the gradient
    --Output--
    normals : ndarray (NxMx3)
        Image with normal vectors for each pixel
    """
    gradients_x = np.zeros_like(im_pos)
    gradients_y = np.zeros_like(im_pos)
    for i in range(3):
        gradients_x[:, :, i], gradients_y[:, :, i] = np.gradient(im_pos[:, :, i], n_offset)

    gradients_x /= np.sqrt(np.sum(gradients_x**2, -1))[:, :, None]
    gradients_y /= np.sqrt(np.sum(gradients_y**2, -1))[:, :, None]

    normals = np.cross(gradients_x.reshape([-1, 3]),
                       gradients_y.reshape([-1, 3])).reshape(im_pos.shape)
    normals /= np.sqrt(np.sum(normals**2, -1))[:, :, None]
    normals = np.nan_to_num(normals)

    return normals
Exemplo n.º 27
0
def initialize_adam(parameters):
    '''
    初始化v和s,他们都是字典类型的向量,都包含了以下字段
    -key:'dW1','db1',...'dWL','dbL'
    -values:与对应的梯度/参数相同维度的值为零的numpy矩阵
    
    :param parameters: -包含了以下参数的字典变量
            parameters['W'+str(l)] = W1
            parameters['b'+str(l)] = bl
    :return: 
    v - 包含梯度的指数加权平均值,字段如下:
        v['dW'+str(l)] = ...
        v['db'+str(l)] = ...
    s - 包含平方梯度的指数加权平均值,字段如下:
        s['dW'+str(l)] = ...
        s['db'+str(l)] = ...
    '''

    L = len(parameters)//2
    v= {}
    s = {}

    for l in range(L):
        v['dW'+str(l+1)] = np.zeros_like(parameters['W'+str(l+1)])
        v['db'+str(l+1)] = np.zeros_like(parameters['b'+str(l+1)])

        s['dW'+str(l+1)] = np.zeros_like(parameters['W'+str(l+1)])
        s['db'+str(l+1)] = np.zeros_like(parameters['b'+str(l+1)])

    return(v,s)
Exemplo n.º 28
0
Arquivo: dhf.py Projeto: pengdl/pyscf
def get_jk_coulomb(mol, dm, hermi=1, coulomb_allow='SSSS',
                   opt_llll=None, opt_ssll=None, opt_ssss=None):
    if coulomb_allow.upper() == 'LLLL':
        logger.info(mol, 'Coulomb integral: (LL|LL)')
        j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
        n2c = j1.shape[1]
        vj = numpy.zeros_like(dm)
        vk = numpy.zeros_like(dm)
        vj[...,:n2c,:n2c] = j1
        vk[...,:n2c,:n2c] = k1
    elif coulomb_allow.upper() == 'SSLL' \
      or coulomb_allow.upper() == 'LLSS':
        logger.info(mol, 'Coulomb integral: (LL|LL) + (SS|LL)')
        vj, vk = _call_veff_ssll(mol, dm, hermi, opt_ssll)
        j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
        n2c = j1.shape[1]
        vj[...,:n2c,:n2c] += j1
        vk[...,:n2c,:n2c] += k1
    else: # coulomb_allow == 'SSSS'
        logger.info(mol, 'Coulomb integral: (LL|LL) + (SS|LL) + (SS|SS)')
        vj, vk = _call_veff_ssll(mol, dm, hermi, opt_ssll)
        j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
        n2c = j1.shape[1]
        vj[...,:n2c,:n2c] += j1
        vk[...,:n2c,:n2c] += k1
        j1, k1 = _call_veff_ssss(mol, dm, hermi, opt_ssss)
        vj[...,n2c:,n2c:] += j1
        vk[...,n2c:,n2c:] += k1
    return vj, vk
Exemplo n.º 29
0
    def calculate_feed_tonnage_constrain(self,schedule,opening = None,closing = None):
        if opening is None:
            opening = np.zeros(self.ndp,dtype=np.int)
        else:
            assert(schedule.shape[0] == opening.shape[0])
            
        if closing is None:
            closing = np.zeros(self.ndp,dtype=np.int)
            closing[:] = self.nperiods - 1
        else:
            assert(schedule.shape[0] == closing.shape[0])


        production_period = self.calculate_feed_tonnage(schedule,opening,closing)

        #calculate the deviation from feed targets
        #logger.debug("minimum_feed_production=%f",self.minimum_feed_production)
        #logger.debug("maximum_feed_production=%f",self.maximum_feed_production)

        minp = np.zeros_like(production_period)
        indices = np.where(production_period < self.minimum_feed_production)[0]
        if len(indices) > 0:
            minp[indices] = self.minimum_feed_production - production_period[indices]

        maxp = np.zeros_like(production_period)
        indices = np.where(production_period > self.maximum_feed_production)[0]
        if len(indices) > 0:
            maxp[indices] = production_period[indices] - self.maximum_feed_production

            
        return tuple(maxp) + tuple(minp)
Exemplo n.º 30
0
def curvesSimilar(t1, y1, t2, y2, tol):
    """
    This function returns True if the two given curves are similar enough within tol. Otherwise returns False.

    t1: time/domain of standard curve we assume to be correct
    y1: values of standard curve, usually either temperature in (K) or log of a mol fraction
    t2: time/domain of test curve
    y2: values of test curve, usually either temperature in (K) or log of a mol fraction

    The test curve is first synchronized to the standard curve using geatNearestTime function. We then calculate the value of
    abs((y1-y2')/y1), giving us a normalized difference for every point. If the average value of these differences is less
    than tol, we say the curves are similar.

    We choose this criteria because it is compatible with step functions we expect to see in ignition systems.
    """
    # Make synchornized version of t2,y2 called t2sync,y2sync.
    t2sync=numpy.zeros_like(t1)
    y2sync=numpy.zeros_like(t1)
    for i, timepoint1 in enumerate(t1):
        time_index = findNearest(t2, timepoint1)
        t2sync[i]=t2[time_index]
        y2sync[i]=y2[time_index]

    # Get R^2 value equivalent:
    normalizedError=abs((y1-y2sync)/y1)
    normalizedError=sum(normalizedError)/len(y1)

    if normalizedError > tol:
        return False
    else:
        return True
Exemplo n.º 31
0
PORTAL_NUM = 11475124

pb = progressbar.ProgressBar()

head = np.load('head.npy')


def find(x):
    if head[head[x]] != head[x]:
        head[x] = find(head[x])
    return head[x]


num = len(head)
count = np.zeros_like(head)
grp = np.zeros_like(head)

total = 0

pb.start(PORTAL_NUM)
for i in range(num):
    count[find(i)] += 1
    pb.update(i + 1)
    if head[i] == i:
        grp[i] = total
        total += 1
pb.finish()

np.savez("result.npz", head, count, grp)
Exemplo n.º 32
0
    def calc_grad(self):
        """Compute parameter gradient."""

        grad = np.zeros_like(self.W)
        W_recs = [self.get_weights(self.W, (l, l))
                  for l in range(self.n_layers)]
        batch_size = self.inputs.shape[0]
        sig_len = self.inputs.shape[1]

        # temporary space to minimize memory allocations
        tmp_act = [np.zeros((batch_size, l), dtype=self.dtype)
                   for l in self.shape]
        tmp_grad = np.zeros_like(grad)

        if self.truncation is None:
            trunc_per = trunc_len = sig_len
        else:
            trunc_per, trunc_len = self.truncation

        for n in range(trunc_per - 1, sig_len, trunc_per):
            # every trunc_per timesteps we want to run backprop

            deltas = [np.zeros((batch_size, l), dtype=self.dtype)
                      for l in self.shape]
            state_deltas = [None if not l.stateful else
                            np.zeros((batch_size, self.shape[i]),
                                     dtype=self.dtype)
                            for i, l in enumerate(self.layers)]

            # backpropagate error
            for s in range(n, np.maximum(n - trunc_len, -1), -1):
                # execute trunc_len steps of backprop through time

                error = self.loss.d_loss([a[:, s] for a in self.activations],
                                         self.targets[:, s])
                error = [np.zeros_like(self.activations[i][:, s]) if e is None
                         else e for i, e in enumerate(error)]

                for l in range(self.n_layers - 1, -1, -1):
                    for post in self.conns[l]:
                        error[l] += np.dot(deltas[post],
                                           self.get_weights(self.W,
                                                            (l, post))[0].T,
                                           out=tmp_act[l])

                        # feedforward gradient
                        W_grad, b_grad = self.get_weights(grad, (l, post))
                        W_tmp_grad, b_tmp_grad = self.get_weights(tmp_grad,
                                                                  (l, post))
                        W_grad += np.dot(self.activations[l][:, s].T,
                                         deltas[post], out=W_tmp_grad)
                        b_grad += np.sum(deltas[post], axis=0, out=b_tmp_grad)

                    # add recurrent error
                    if l in self.rec_layers:
                        error[l] += np.dot(deltas[l], W_recs[l][0].T,
                                           out=tmp_act[l])

                    # compute deltas
                    if not self.layers[l].stateful:
                        self.J_dot(self.d_activations[l][:, s], error[l],
                                   transpose_J=True, out=deltas[l])
                    else:
                        d_input = self.d_activations[l][:, s, ..., 0]
                        d_state = self.d_activations[l][:, s, ..., 1]
                        d_output = self.d_activations[l][:, s, ..., 2]

                        state_deltas[l] += self.J_dot(d_output, error[l],
                                                      transpose_J=True,
                                                      out=tmp_act[l])
                        self.J_dot(d_input, state_deltas[l], transpose_J=True,
                                   out=deltas[l])
                        self.J_dot(d_state, state_deltas[l], transpose_J=True,
                                   out=state_deltas[l])

                    # gradient for recurrent weights
                    if l in self.rec_layers:
                        W_grad, b_grad = self.get_weights(grad, (l, l))
                        W_tmp_grad, b_tmp_grad = self.get_weights(tmp_grad,
                                                                  (l, l))
                        if s > 0:
                            W_grad += np.dot(self.activations[l][:, s - 1].T,
                                             deltas[l], out=W_tmp_grad)
                        else:
                            # put remaining gradient into initial bias
                            b_grad += np.sum(deltas[l], axis=0,
                                             out=b_tmp_grad)

        grad /= batch_size

        return grad
Exemplo n.º 33
0
def main(args):
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # ========= Define VIBE model ========= #
    model = VIBE_Demo(
        seqlen=16,
        device=device,
        n_layers=2,
        hidden_size=1024,
        add_linear=True,
        use_residual=True,
    ).to(device)

    # ========= Load pretrained weights ========= #
    pretrained_file = download_ckpt(use_3dpw=False)
    ckpt = torch.load(pretrained_file, map_location=device)
    print(f'Performance of pretrained model on 3DPW: {ckpt["performance"]}')
    ckpt = ckpt['gen_state_dict']
    model.load_state_dict(ckpt, strict=False)
    model.eval()
    print(f'Loaded pretrained weights from \"{pretrained_file}\"')

    total_time = time.time()
    # ========= Run VIBE on crops ========= #
    print(f'Running VIBE on crops...')
    vibe_time = time.time()
    image_folder = args.input_folder

    dataset = InferenceFromCrops(image_folder=image_folder)
    orig_height = orig_width = 512

    dataloader = DataLoader(dataset,
                            batch_size=args.vibe_batch_size,
                            num_workers=0)

    with torch.no_grad():

        pred_cam, pred_verts, pred_pose, pred_betas, pred_joints3d, norm_joints2d = [], [], [], [], [], []

        for batch_num, batch in enumerate(dataloader):
            print("BATCH:", batch_num)
            batch = batch.unsqueeze(0)
            batch = batch.to(device)

            batch_size, seqlen = batch.shape[:2]
            output = model(batch)[-1]

            pred_cam.append(output['theta'][:, :, :3].reshape(
                batch_size * seqlen, -1))
            pred_verts.append(output['verts'].reshape(batch_size * seqlen, -1,
                                                      3))
            pred_pose.append(output['theta'][:, :, 3:75].reshape(
                batch_size * seqlen, -1))
            pred_betas.append(output['theta'][:, :, 75:].reshape(
                batch_size * seqlen, -1))
            pred_joints3d.append(output['kp_3d'].reshape(
                batch_size * seqlen, -1, 3))

        pred_cam = torch.cat(pred_cam, dim=0)
        pred_verts = torch.cat(pred_verts, dim=0)
        pred_pose = torch.cat(pred_pose, dim=0)
        pred_betas = torch.cat(pred_betas, dim=0)
        pred_joints3d = torch.cat(pred_joints3d, dim=0)

        del batch

    # ========= [Optional] run Temporal SMPLify to refine the results ========= #
    if args.run_smplify and args.tracking_method == 'pose':
        norm_joints2d = np.concatenate(norm_joints2d, axis=0)
        norm_joints2d = convert_kps(norm_joints2d, src='staf', dst='spin')
        norm_joints2d = torch.from_numpy(norm_joints2d).float().to(device)

        # Run Temporal SMPLify
        update, new_opt_vertices, new_opt_cam, new_opt_pose, new_opt_betas, \
        new_opt_joints3d, new_opt_joint_loss, opt_joint_loss = smplify_runner(
            pred_rotmat=pred_pose,
            pred_betas=pred_betas,
            pred_cam=pred_cam,
            j2d=norm_joints2d,
            device=device,
            batch_size=norm_joints2d.shape[0],
            pose2aa=False,
        )

        # update the parameters after refinement
        print(
            f'Update ratio after Temporal SMPLify: {update.sum()} / {norm_joints2d.shape[0]}'
        )
        pred_verts = pred_verts.cpu()
        pred_cam = pred_cam.cpu()
        pred_pose = pred_pose.cpu()
        pred_betas = pred_betas.cpu()
        pred_joints3d = pred_joints3d.cpu()
        pred_verts[update] = new_opt_vertices[update]
        pred_cam[update] = new_opt_cam[update]
        pred_pose[update] = new_opt_pose[update]
        pred_betas[update] = new_opt_betas[update]
        pred_joints3d[update] = new_opt_joints3d[update]

    elif args.run_smplify and args.tracking_method == 'bbox':
        print(
            '[WARNING] You need to enable pose tracking to run Temporal SMPLify algorithm!'
        )
        print('[WARNING] Continuing without running Temporal SMPLify!..')

    # ========= Save results to a pickle file ========= #
    output_path = image_folder.replace('cropped_frames', 'vibe_results')
    os.makedirs(output_path, exist_ok=True)

    pred_cam = pred_cam.cpu().numpy()
    pred_verts = pred_verts.cpu().numpy()
    pred_pose = pred_pose.cpu().numpy()
    pred_betas = pred_betas.cpu().numpy()
    pred_joints3d = pred_joints3d.cpu().numpy()

    vibe_results = {
        'pred_cam': pred_cam,
        'verts': pred_verts,
        'pose': pred_pose,
        'betas': pred_betas,
        'joints3d': pred_joints3d,
    }

    del model
    end = time.time()
    fps = len(dataset) / (end - vibe_time)

    print(f'VIBE FPS: {fps:.2f}')
    total_time = time.time() - total_time
    print(
        f'Total time spent: {total_time:.2f} seconds (including model loading time).'
    )
    print(
        f'Total FPS (including model loading time): {len(dataset) / total_time:.2f}.'
    )

    print(
        f'Saving vibe results to \"{os.path.join(output_path, "vibe_results.pkl")}\".'
    )

    with open(os.path.join(output_path, "vibe_results.pkl"), 'wb') as f_save:
        pickle.dump(vibe_results, f_save)

    if not args.no_render:
        # ========= Render results as a single video ========= #
        renderer = Renderer(resolution=(orig_width, orig_height),
                            orig_img=True,
                            wireframe=args.wireframe)

        output_img_folder = os.path.join(output_path, 'vibe_images')
        os.makedirs(output_img_folder, exist_ok=True)

        print(f'Rendering output video, writing frames to {output_img_folder}')

        image_file_names = sorted([
            os.path.join(image_folder, x) for x in os.listdir(image_folder)
            if x.endswith('.png') or x.endswith('.jpg')
        ])

        for frame_idx in tqdm(range(len(image_file_names))):
            img_fname = image_file_names[frame_idx]
            img = cv2.imread(img_fname)

            frame_verts = vibe_results['verts'][frame_idx]
            frame_cam = vibe_results['pred_cam'][frame_idx]

            mesh_filename = None

            if args.save_obj:
                mesh_folder = os.path.join(output_path, 'vibe_meshes')
                os.makedirs(mesh_folder, exist_ok=True)
                mesh_filename = os.path.join(mesh_folder,
                                             f'{frame_idx:06d}.obj')

            rend_img = renderer.render(
                img,
                frame_verts,
                cam=frame_cam,
                mesh_filename=mesh_filename,
            )

            whole_img = rend_img

            if args.sideview:
                side_img_bg = np.zeros_like(img)
                side_rend_img90 = renderer.render(
                    side_img_bg,
                    frame_verts,
                    cam=frame_cam,
                    angle=90,
                    axis=[0, 1, 0],
                )
                side_rend_img270 = renderer.render(
                    side_img_bg,
                    frame_verts,
                    cam=frame_cam,
                    angle=270,
                    axis=[0, 1, 0],
                )
                if args.reposed_render:
                    smpl = SMPL('data/vibe_data', batch_size=1)
                    zero_pose = torch.from_numpy(
                        np.zeros((1, pred_pose.shape[-1]))).float()
                    zero_pose[:, 0] = np.pi
                    pred_frame_betas = torch.from_numpy(
                        pred_betas[frame_idx][None, :]).float()
                    with torch.no_grad():
                        reposed_smpl_output = smpl(
                            betas=pred_frame_betas,
                            body_pose=zero_pose[:, 3:],
                            global_orient=zero_pose[:, :3])
                        reposed_verts = reposed_smpl_output.vertices
                        reposed_verts = reposed_verts.cpu().detach().numpy()

                    reposed_cam = np.array([0.9, 0, 0])
                    reposed_rend_img = renderer.render(side_img_bg,
                                                       reposed_verts[0],
                                                       cam=reposed_cam)
                    reposed_rend_img90 = renderer.render(side_img_bg,
                                                         reposed_verts[0],
                                                         cam=reposed_cam,
                                                         angle=90,
                                                         axis=[0, 1, 0])

                    top_row = np.concatenate(
                        [img, reposed_rend_img, reposed_rend_img90], axis=1)
                    bot_row = np.concatenate(
                        [rend_img, side_rend_img90, side_rend_img270], axis=1)
                    whole_img = np.concatenate([top_row, bot_row], axis=0)

                else:
                    top_row = np.concatenate([img, side_img_bg, side_img_bg],
                                             axis=1)
                    bot_row = np.concatenate(
                        [rend_img, side_rend_img90, side_rend_img270], axis=1)
                    whole_img = np.concatenate([top_row, bot_row], axis=0)

            # cv2.imwrite(os.path.join(output_img_folder, f'{frame_idx:06d}.png'), whole_img)
            cv2.imwrite(
                os.path.join(output_img_folder, os.path.basename(img_fname)),
                whole_img)

        # ========= Save rendered video ========= #
        save_vid_path = os.path.join(output_path, 'vibe_video.mp4')
        print(f'Saving result video to {save_vid_path}')
        images_to_video(img_folder=output_img_folder,
                        output_vid_file=save_vid_path)

    print('================= END =================')
def BostonData_view():
    # Load the dataset
    db = datasets.load_boston()
    print("数据特征项:", db.feature_names)
    db_X = db.data
    print("X行列数:", db_X.shape)
    db_y = db.target
    print("y行列数:", db_y.shape)

    # 构造pandas的Frame格式,X特征值
    df = pd.DataFrame(db_X)
    # 特征值名称【列名】
    df.columns = db.feature_names
    # 定义结果名称【列名】为Price,并且将target的值赋值给此列
    df['Price'] = db_y
    # 显示前5前数据【包括表头】
    print("boston数据预览:\n", df.head())

    # 制作组图,不同特征属性的散点图
    sns.set(style='whitegrid', context='notebook')
    cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'Price']
    sns.pairplot(df[cols], height=2)
    plt.show()

    # 指定特征值与目标值的相关系数矩阵
    # 可视化相关系数矩阵,理论:皮尔逊相关系数
    cm = np.corrcoef(df[cols].values.T)
    sns.set(font_scale=1.3)
    hm = sns.heatmap(cm,
                     cbar=True,
                     annot=True,
                     square=True,
                     fmt='.2f',
                     annot_kws={'size': 13},
                     yticklabels=cols,
                     xticklabels=cols)
    plt.show()

    # 显示矩阵热力图的一半
    mask = np.zeros_like(cm)
    print(mask)
    mask[np.triu_indices_from(mask)] = True
    with sns.axes_style("white"):
        ax = sns.heatmap(cm,
                         mask=mask,
                         vmax=1,
                         annot=True,
                         square=True,
                         fmt='.2f',
                         annot_kws={'size': 13},
                         yticklabels=cols,
                         xticklabels=cols)
    plt.show()

    # 所有特征值的相关系数,取相关系数大于0.5的制作相关系数矩阵
    # 不用修改直接运行
    corrmat = df.corr().abs()  # 计算连续型特征之间的相关系数
    # 将于SalePrice的相关系数大于5的特征取出来,并按照SalePrice降序排列,然后取出对应的特征名,保存在列表中
    top_corr = corrmat[corrmat["Price"] > 0.5].sort_values(
        by=["Price"], ascending=False).index
    cm = abs(np.corrcoef(
        df[top_corr].values.T))  # 注意这里要转置,否则变成样本之间的相关系数,而我们要计算的是特征之间的相关系数
    # f, ax = plt.subplots(figsize=(20, 9))
    sns.set(font_scale=1.3)
    hm = sns.heatmap(cm,
                     cbar=True,
                     annot=True,
                     cmap='YlGnBu',
                     square=True,
                     fmt='.2f',
                     annot_kws={'size': 13},
                     yticklabels=top_corr.values,
                     xticklabels=top_corr.values)
    plt.show()
Exemplo n.º 35
0
        # full_cost = np.sum(2*(np.sqrt(1 + np.array(along_track_diffs)**2) - 1))
        full_cost = np.sum(np.abs(np.array(full_residuals)))/len(full_residuals)

        full_cost_list.append(full_cost)

        print('3D cost:', full_cost)
        #print('RMS cost:', np.sum(rms_cost))



    ### PLOT VELOCITIES
    plt.scatter(velocities[1:], obs_time[1:])
    
    x_times = np.linspace(np.min(obs_time), np.max(obs_time), 100)
    #plt.plot(line(x_times, *popt), x_times)
    plt.plot(np.zeros_like(x_times) + v_init, x_times)

    plt.gca().invert_yaxis()

    plt.show()


    
    # # Load the numpy array with the results from a file
    # solutions = np.load(results_file)

    # # Find the best solution among the top N
    # best_solution = findBestSolution(solutions)

    # print('BEST SOLUTION:', best_solution)
Exemplo n.º 36
0
frameClone = old_frame.copy()
for o in range(op.nPoints):
    for m in range(len(detected_key_points[o])):
        cv.circle(frameClone, detected_key_points[o][m][0:2], 5, op.colors[o],
                  -1, cv.LINE_AA)

p0 = []
for key_point in detected_key_points:
    for person_kp in key_point:
        p0.append(np.matrix(person_kp[0:2]).astype(np.float32))
p0 = np.array(p0)
print(type(cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)[0]))
print(type(p0[0]))

# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)

while 1:
    ret, frame = cap.read()
    frame = frame[9:710, 443:836]
    frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

    # calculate optical flow
    p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                          **lk_params)

    # Select good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]

    # draw the tracks
Exemplo n.º 37
0
vertical_spacing = 0.05  # in meters
max_depth_of_section = 5  # meters

fp = 'deltaRCM_Output/pyDeltaRCM_output.nc'
nc = netCDF4.Dataset(fp)

strata_sf = nc.variables['strata_sand_frac'][:]
strata_depth = nc.variables['strata_depth'][:]

# shortcuts for array sizes
dz, dx, dy = strata_depth.shape
nx, ny, nz = dx, dy, int(5 / vertical_spacing)

# preserves only the oldest surface when cross-cutting
strata = np.zeros_like(strata_depth)
strata[-1, :, :] = strata_depth[-1, :, :]

for i in range(1, dz):
    strata[-i - 1, :, :] = np.minimum(strata_depth[-i - 1, :, :],
                                      strata[-i, :, :])

# combines depths and sand fractions into stratigraphy
stratigraphy = np.zeros((nz, nx, ny))

for j in range(dx):

    mask = np.ones((nz, ny)) * -1

    for i in np.arange(dz - 1, -1, -1):
 def reset_test_buffer(self):
     self.test_buffer = np.zeros_like(self.test_buffer, dtype=np.float32)
     self.test_buff_pos = 0
     self.test_size = 0
def make_covmap(goodval, wave_phase, pixel_phase):

    covmap = np.zeros_like(wave_phase)

    for i, j in zip(goodval[0], goodval[1]):

        wavevals = wave_phase[:, i, j]
        pixvals = pixel_phase[:, i, j]

        hit_w = np.zeros(100)

        for m in range(0, wavevals.size):
            wval = int(round(wavevals[m] * 100))

            if (wval == 100):
                wval = 0  # Wrap around

            if (wval < 25):
                hit_w[wval:wval + 25] = 1
                hit_w[0:wval] = 1
                hit_w[100 - (25 - wval):] = 1

            if ((wval >= 25) and (wval < 75)):
                hit_w[wval - 25:wval + 25] = 1

            if (wval >= 75):
                hit_w[wval - 25:wval] = 1
                hit_w[wval:] = 1
                hit_w[0:wval - 75] = 1

        hit_p = np.zeros(100)

        for m in range(0, pixvals.size):
            pval = int(round(pixvals[m] * 100))

            if (pval == 100):
                pval = 0  # ; Wrap around

            if (pval < 25):
                hit_p[pval:pval + 25] = 1
                hit_p[0:pval] = 1
                hit_p[100 - (25 - pval):] = 1

            if ((pval >= 25) and (pval < 75)):
                hit_p[pval - 25:pval + 25] = 1

            if (pval >= 75):
                hit_p[pval - 25:pval] = 1
                hit_p[pval:] = 1
                hit_p[0:pval - 75] = 1

        temp_w = idlwrap.where(hit_w == 1)
        nhit_w = len(temp_w)

        temp_p = idlwrap.where(hit_p == 1)
        nhit_p = len(temp_p)

        covmap[0, i, j] = nhit_w / 100.
        covmap[1, i, j] = nhit_p / 100.

    return covmap
Exemplo n.º 40
0
    def _serialize_event_exp(self, config):
        """serialize event expression and sent them to game engine"""
        game = self.game

        # collect agent symbol
        symbol2int = {}
        config.symbol_ct = 0

        def collect_agent_symbol(node, config):
            for item in node.inputs:
                if isinstance(item, EventNode):
                    collect_agent_symbol(item, config)
                elif isinstance(item, AgentSymbol):
                    if item not in symbol2int:
                        symbol2int[item] = config.symbol_ct
                        config.symbol_ct += 1

        for rule in config.reward_rules:
            on = rule[0]
            receiver = rule[1]
            for symbol in receiver:
                if symbol not in symbol2int:
                    symbol2int[symbol] = config.symbol_ct
                    config.symbol_ct += 1
            collect_agent_symbol(on, config)

        # collect event node
        event2int = {}
        config.node_ct = 0

        def collect_event_node(node, config):
            if node not in event2int:
                event2int[node] = config.node_ct
                config.node_ct += 1
            for item in node.inputs:
                if isinstance(item, EventNode):
                    collect_event_node(item, config)

        for rule in config.reward_rules:
            collect_event_node(rule[0], config)

        # send to C++ engine
        for sym in symbol2int:
            no = symbol2int[sym]
            _LIB.gridworld_define_agent_symbol(game, no, sym.group, sym.index)

        for event in event2int:
            no = event2int[event]
            inputs = np.zeros_like(event.inputs, dtype=np.int32)
            for i, item in enumerate(event.inputs):
                if isinstance(item, EventNode):
                    inputs[i] = event2int[item]
                elif isinstance(item, AgentSymbol):
                    inputs[i] = symbol2int[item]
                else:
                    inputs[i] = item
            n_inputs = len(inputs)
            _LIB.gridworld_define_event_node(game, no, event.op, as_int32_c_array(inputs), n_inputs)

        for rule in config.reward_rules:
            # rule = [on, receiver, value, terminal]
            on = event2int[rule[0]]

            receiver = np.zeros_like(rule[1], dtype=np.int32)
            for i, item in enumerate(rule[1]):
                receiver[i] = symbol2int[item]
            if len(rule[2]) == 1 and rule[2][0] == 'auto':
                value = np.zeros(receiver, dtype=np.float32)
            else:
                value = np.array(rule[2], dtype=np.float32)
            n_receiver = len(receiver)
            _LIB.gridworld_add_reward_rule(game, on, as_int32_c_array(receiver),
                                           as_float_c_array(value), n_receiver, rule[3])
Exemplo n.º 41
0
def setup_atm(filename_free,filename_close,filename_far,TI=0.11,N=24001):
        print 'read data'
        s = Time.time()

        """free FAST"""
        # filename = '/Users/ningrsrch/Dropbox/Projects/waked-loads/BYU/BYU/C676_W8_T11.0_P0.0_m2D_L-1.0/Model.out'
        lines = np.loadtxt(filename_free,skiprows=8)
        time = lines[:,0]
        # mx = lines[:,11]
        my = lines[:,12]
        a = lines[:,5]
        # o_free = lines[:,6]
        Omega_free = np.mean(lines[:,6])
        time = time-time[0]

        m_free = interp1d(time, my, kind='linear')
        a_free = interp1d(time, a, kind='linear')
        # o_free = interp1d(time, o_free, kind='cubic')
        # m_free = my


        """waked FAST CLOSE"""
        # filename = '/Users/ningrsrch/Dropbox/Projects/waked-loads/BYU/BYU/C653_W8_T11.0_P0.0_4D_L0/Model.out'
        lines = np.loadtxt(filename_close,skiprows=8)
        time = lines[:,0]
        # mx = lines[:,11]
        my = lines[:,12]
        a = lines[:,5]
        # o_waked = lines[:,6]
        Omega_waked = np.mean(lines[:,6])
        time = time-time[0]
        m_close = interp1d(time, my, kind='linear')
        a_close = interp1d(time, a, kind='linear')
        # o_close = interp1d(time, o_waked, kind='cubic')
        # m_close = my

        """waked FAST FAR"""
        # filename = '/Users/ningrsrch/Dropbox/Projects/waked-loads/BYU/BYU/C671_W8_T11.0_P0.0_10D_L0/Model.out'
        lines = np.loadtxt(filename_far,skiprows=8)
        time = lines[:,0]
        # mx = lines[:,11]
        my = lines[:,12]
        a = lines[:,5]
        # o_waked = lines[:,6]
        time = time-time[0]
        m_far = interp1d(time, my, kind='linear')
        a_far = interp1d(time, a, kind='linear')
        # o_far = interp1d(time, o_waked, kind='cubic')
        # m_far = my

        print Time.time()-s

        """setup the CCBlade loads"""
        angles = np.linspace(0.,360.,50)

        ccblade_free = np.zeros_like(angles)
        ccblade_close = np.zeros_like(angles)
        ccblade_far = np.zeros_like(angles)

        turbineX_close = np.array([0.,126.4])*4.
        turbineX_far = np.array([0.,126.4])*10.

        turbineY_free = np.array([0.,1264000.])
        turbineY_waked = np.array([0.,0.])

        wind_speed = 8.

        Rhub,r,chord,theta,af,Rhub,Rtip,B,rho,mu,precone,hubHt,nSector,pitch,yaw_deg = setup_airfoil()

        print 'make CCBlade functions'
        s = Time.time()
        for i in range(len(angles)):
                az = angles[i]

                #freestream
                x_locs,y_locs,z_locs = findXYZ(turbineX_close[1],turbineY_free[1],90.,r,yaw_deg,az)
                speeds, _ = get_speeds(turbineX_close, turbineY_free, x_locs, y_locs, z_locs, wind_speed,TI=TI)
                ccblade_free[i], _ = calc_moment(speeds,Rhub,r,chord,theta,af,Rhub,Rtip,B,rho,mu,precone,hubHt,nSector,Omega_free,pitch,azimuth=az)

                #waked close
                x_locs,y_locs,z_locs = findXYZ(turbineX_close[1],turbineY_waked[1],90.,r,yaw_deg,az)
                speeds, _ = get_speeds(turbineX_close, turbineY_waked, x_locs, y_locs, z_locs, wind_speed,TI=TI)
                ccblade_close[i], _ = calc_moment(speeds,Rhub,r,chord,theta,af,Rhub,Rtip,B,rho,mu,precone,hubHt,nSector,Omega_free,pitch,azimuth=az)

                #waked far
                x_locs,y_locs,z_locs = findXYZ(turbineX_far[1],turbineY_waked[1],90.,r,yaw_deg,az)
                speeds, _ = get_speeds(turbineX_far, turbineY_waked, x_locs, y_locs, z_locs, wind_speed,TI=TI)
                ccblade_far[i], _ = calc_moment(speeds,Rhub,r,chord,theta,af,Rhub,Rtip,B,rho,mu,precone,hubHt,nSector,Omega_waked,pitch,azimuth=az)

                if i == 0:
                        free_speed = get_eff_turbine_speeds(turbineX_close, turbineY_free, wind_speed,TI=TI)[1]
                        waked_speed = get_eff_turbine_speeds(turbineX_close, turbineY_waked, wind_speed,TI=TI)[1]

        f_free = interp1d(angles, ccblade_free/1000., kind='linear')
        f_close = interp1d(angles, ccblade_close/1000., kind='linear')
        f_far = interp1d(angles, ccblade_far/1000., kind='linear')
        print Time.time()-s

        t = np.linspace(0.,600.,N)

        # t = time
        dt = t[1]-t[0]

        CC_free = np.zeros_like(t)
        CC_close = np.zeros_like(t)
        CC_far = np.zeros_like(t)

        FAST_free = np.zeros_like(t)
        FAST_close = np.zeros_like(t)
        FAST_far = np.zeros_like(t)

        """get atm loads"""

        # pos_free = 0.
        # pos_close = 0.
        # pos_far = 0.

        print 'call CCBlade functions'
        s = Time.time()
        for i in range(len(t)):

                # M_free[i] = f_free(pos_free)
                # pos_free = (pos_free+(o_free((t[i+1]+t[i])/2.)*dt/60.)*360.)%360.
                CC_free[i] = f_free(a_free(t[i]))

                # M_close[i] = f_close(pos_close)
                # pos_close = (pos_close+(o_close((t[i+1]+t[i])/2.)*dt/60.)*360.)%360.
                CC_close[i] = f_close(a_close(t[i]))

                # M_far[i] = f_far(pos_far)
                # pos_far = (pos_far+(o_far((t[i+1]+t[i])/2.)*dt/60.)*360.)%360.
                CC_far[i] = f_far(a_far(t[i]))

                FAST_free[i] = m_free(t[i])
                FAST_close[i] = m_close(t[i])
                FAST_far[i] = m_far(t[i])

        print Time.time()-s

        atm_free = FAST_free-CC_free
        atm_close = FAST_close-CC_close
        atm_far = FAST_far-CC_far
        # atm_free = m_free-CC_free
        # atm_close = m_close-CC_close
        # atm_far = m_far-CC_far

        f_atm_free = interp1d(t, atm_free, kind='linear')
        f_atm_close = interp1d(t, atm_close, kind='linear')
        f_atm_far = interp1d(t, atm_far, kind='linear')

        return f_atm_free,f_atm_close,f_atm_far,Omega_free,Omega_waked,free_speed,waked_speed
Exemplo n.º 42
0
    def __init__(self, model, upsample_size=UPSAMPLE_SIZE):

        mask_size = np.ceil(np.array((32, 32), dtype=float) /
                            upsample_size)
        mask_size = mask_size.astype(int)
        self.mask_size = mask_size
        mask = np.zeros(self.mask_size)
        pattern = np.zeros((32, 32, 3))
        mask = np.expand_dims(mask, axis=2)

        mask_tanh = np.zeros_like(mask)
        pattern_tanh = np.zeros_like(pattern)

        # prepare mask related tensors
        self.mask_tanh_tensor = K.variable(mask_tanh)
        mask_tensor_unrepeat = (K.tanh(self.mask_tanh_tensor) \
            / (2 - K.epsilon()) + 0.5)
        mask_tensor_unexpand = K.repeat_elements(
            mask_tensor_unrepeat,
            rep=3,
            axis=2)
        self.mask_tensor = K.expand_dims(mask_tensor_unexpand, axis=0)
        upsample_layer = UpSampling2D(
            size=(upsample_size, upsample_size))
        mask_upsample_tensor_uncrop = upsample_layer(self.mask_tensor)
        uncrop_shape = K.int_shape(mask_upsample_tensor_uncrop)[1:]
        cropping_layer = Cropping2D(
            cropping=((0, uncrop_shape[0] - 32),
                      (0, uncrop_shape[1] - 32)))
        self.mask_upsample_tensor = cropping_layer(
            mask_upsample_tensor_uncrop)
        # self.mask_upsample_tensor = K.round(self.mask_upsample_tensor)
        reverse_mask_tensor = (K.ones_like(self.mask_upsample_tensor) -
                               self.mask_upsample_tensor)

        # prepare pattern related tensors
        self.pattern_tanh_tensor = K.variable(pattern_tanh)
        self.pattern_raw_tensor = (
            (K.tanh(self.pattern_tanh_tensor) / (2 - K.epsilon()) + 0.5) *
            255.0)

        # prepare input image related tensors
        # ignore clip operation here
        # assume input image is already clipped into valid color range
        input_tensor = K.placeholder((None,32,32,3))
        input_raw_tensor = input_tensor

        # IMPORTANT: MASK OPERATION IN RAW DOMAIN
        X_adv_raw_tensor = (
            reverse_mask_tensor * input_raw_tensor +
            self.mask_upsample_tensor * self.pattern_raw_tensor)

        X_adv_tensor = X_adv_raw_tensor

        output_tensor = model(X_adv_tensor)
        y_target_tensor = K.placeholder((None,43))
        y_true_tensor = K.placeholder((None,43))

        self.loss_ce = categorical_crossentropy(output_tensor, y_target_tensor)

        self.hyperparameters = K.reshape(K.constant(np.array([1e-2, 1e-5, 1e-7, 1e-8, 0, 1e-2])), shape=(6, 1))
        self.loss_reg = self.build_tabor_regularization(input_raw_tensor,
                                                        model, y_target_tensor,
                                                        y_true_tensor)
        self.loss_reg = K.dot(K.reshape(self.loss_reg, shape=(1, 6)), self.hyperparameters)
        self.loss = K.mean(self.loss_ce) + self.loss_reg
        self.opt = Adam(lr=1e-3, beta_1=0.5, beta_2=0.9)
        self.updates = self.opt.get_updates(
            params=[self.pattern_tanh_tensor, self.mask_tanh_tensor],
            loss=self.loss)
        self.train = K.function(
            [input_tensor, y_true_tensor, y_target_tensor],
            [self.loss_ce, self.loss_reg, self.loss],
            updates=self.updates)
Exemplo n.º 43
0
def corner_detectTrack(camA, camB):
    
    # Generate some random colors later used to display movement tracks
    color3 = np.ones((100,1))*255; color2 = np.zeros((100,1)); color1 = np.zeros((100,1))
    color = np.hstack((color1,color2,color3))
    
    index = 0
    
    #get ROIs from sv_getROIs module
    [masked_grays1, boundboxes1] = get_ROIs(camA[0])
    [masked_grays2, boundboxes2] = get_ROIs(camB[0])
    
     # Take first frame
    old_frame = cv2.imread(camA[0])
    old_gray = cv2.imread(camA[0],0)
     
    
    slidesA = [[None] for x in range(len(first_cam)-1)] #images from camA used for plotting
    slidesB = [[None] for x in range(len(second_cam)-1)] #images from camB used for plotting
    
    disps_roi = {'ROI # ' + str(u): 0 for u in range(num_roi)}
    
    for q in range(0,num_roi):
         
        imgA_before = []; imgA_after = []; H_matA = []
        imgB_before = []; imgB_after = []; H_matB = []
        
        #params for ShiTomasi corner detection
        feature_params = dict(maxCorners = 100,
                               qualityLevel = 0.3,
                               minDistance = 7,
                               blockSize = 7,
                               gradientSize = 7,
                               mask = masked_grays2[q]) #ROI from trials
        
        
        # goodFeaturesToTrack determines strong corners on an image
        # can be used to initialize any point-based tracker such as the calcOpticalFlowPyrLK
        # first search for the features in the rois from the first camera; 
        # search for those same initialized features in all the images in the second camera
        
        p0 = cv2.goodFeaturesToTrack(old_gray, **feature_params)  #search for features from first camera pic
        prev_coord = p0
        
        print('number of corners found: '+ str(len(p0)))
        
        # Create a mask image for drawing purposes
        mask = np.zeros_like(old_frame)
        
        ##################################################################  for cam A  ###########################################################
        
        while index < (len(camA)-1):
            print('image' + str(index))
            frame = cv2.imread(camA[index+1])
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            
            # Parameters for lucas kanade optical flow
            lk_params = dict( winSize  = (15,15),
                          maxLevel = 10,
                          criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,0.03))
            
            # calculate optical flow
            # for a sparse feature set using the iterative LK method with pyramids       
            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
            
            #p1 = p1[st==1]
            #p1 = p1[st==1] #shouldn't p1 = p1[st==1]??; i.e. only found points?
            #print('p0 = ' + str(len(p0))); print('p1 = ' + str(len(p1))); print('prev_coord = ' + str(len(prev_coord)))
            
            if (len(p1) != len(prev_coord)):
                print('didnt find same corners')
                #can't find corners --> iterate over new lk parameters
                for i in range(20,100,2): #change criteria for max number of iterations first 
                    lk_params['maxLevel'] = i
                    print('# Levels = ' + str(i))
                    
                    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) #recalculate p1
                    #p1 = p1[st==1] #shouldn't p1 = p1[st==1]??
                    #print ('new p1 length = ' + str(len(p1)))
                    
                    if (len(p1) != len(prev_coord)):
                        print('still bad. continue changing parameters')
                        continue
                    else:
                        print('found good parameters')
                        break
            else:
                print('found same corners')
            
            #print('p0 = ' + str(len(p0))); print('p1 = ' + str(len(p1))); print('prev_coord = ' + str(len(prev_coord)))
            #previous block of code should ensure that len(p1) == len(prev_coord)
            
            H, Hmask = cv2.findHomography(p0, p1, cv2.RANSAC,5.0) #H is 3x3 homography matrix
            #not really needed
            
            #print(prev_coord[0][0])
            imgA_before.append(prev_coord)
            imgA_after.append(p1)
            H_matA.append(H)
            
            prev_coord = p1
            #print(prev_coord[0][0])
            
            # Select good points #does p0 need to be reshaped to this good_new at the end? shouldn't p1 = p1[st==1]??
            good_new = p1
            good_old = p0
            
#           for i in range(len(p1)):
#                frame = cv2.putText(frame, str(i), (int(p1[i][0]),int(p1[i][1]+100)), cv2.FONT_HERSHEY_TRIPLEX, 3, (0, 0, 255), 5) 
                
#            # draw the tracks
#            for i,(new,old) in enumerate(zip(good_new,good_old)):
#                a,b = new.ravel()
#                c,d = old.ravel()
#                mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 10)
#                frame = cv2.circle(frame,(a,b),7,color[i].tolist(),-1, lineType = 8) 
#            
#            
#            img = cv2.add(frame,mask)
#            img = cv2.add(img,boundboxes1[q]) #plot ROIs and OG image; boundbox from trials
#            
#            #slides[index][q] = img #problems updating (only first collumn is updating)
#            #slides = np.vstack((slides,img))
#            #add images from consective images to slides_frames
#            slidesA[index] = img
        
            # Now update the previous frame and previous points
            old_frame = frame.copy()
            index = index + 1
            #p0 = good_new.reshape(-1,1,2)
            
        index = 0;  #reset index for camB
        
        print('finished tracking images from camA')
        
        
        ##################################################################  for camB  ###########################################################
        old_frame = cv2.imread(camB[0])
        old_gray = cv2.imread(camB[0],0)
        
        while index < (len(camB)-1):
            print('image' + str(index))
            frame = cv2.imread(camB[index+1])
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            
            # Parameters for lucas kanade optical flow
            lk_params = dict( winSize  = (15,15),
                          maxLevel = 10,
                          criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,0.03))
            
            # calculate optical flow
            # for a sparse feature set using the iterative LK method with pyramids       
            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
            
            #p1 = p1[st==1]
            #p1 = p1[st==1] #shouldn't p1 = p1[st==1]??; i.e. only found points?
            #print('p0 = ' + str(len(p0))); print('p1 = ' + str(len(p1))); print('prev_coord = ' + str(len(prev_coord)))
            
            if (len(p1) != len(prev_coord)):
                print('didnt find same corners')
                #can't find corners --> iterate over new lk parameters
                for i in range(20,100,2): #change criteria for max number of iterations first 
                    lk_params['maxLevel'] = i
                    print('# Levels = ' + str(i))
                    
                    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) #recalculate p1
                    #p1 = p1[st==1] #shouldn't p1 = p1[st==1]??
                    #print ('new p1 length = ' + str(len(p1)))
                    
                    if (len(p1) != len(prev_coord)):
                        print('still bad. continue changing parameters')
                        continue
                    else:
                        print('found good criteria')
                        break
            else:
                print('found same corners')
            
            #print('p0 = ' + str(len(p0))); print('p1 = ' + str(len(p1))); print('prev_coord = ' + str(len(prev_coord)))
            #previous block of code should ensure that len(p1) == len(prev_coord)
            
            H, Hmask = cv2.findHomography(p0, p1, cv2.RANSAC,5.0) #H is 3x3 homography matrix
            #not really needed
            
            #print(prev_coord[0][0])
            imgB_before.append(prev_coord)
            imgB_after.append(p1)
            H_matB.append(H)
            
            prev_coord = p1
            #print(prev_coord[0][0])
            
            # Select good points #does p0 need to be reshaped to this good_new at the end? shouldn't p1 = p1[st==1]??
            good_new = p1
            good_old = p0
            
#            for i in range(len(p1)):
#                frame = cv2.putText(frame, str(i), (int(p1[i][0]),int(p1[i][1]+100)), cv2.FONT_HERSHEY_TRIPLEX, 3, (0, 0, 255), 5) 
                
            # draw the tracks
            for i,(new,old) in enumerate(zip(good_new,good_old)):
                a,b = new.ravel()
                c,d = old.ravel()
                mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 12)
                frame = cv2.circle(frame,(a,b),7,color[i].tolist(),-1, lineType = 10) 
            
            
            img = cv2.add(frame,mask)
            img = cv2.add(img,boundboxes2[q]) #plot ROIs and OG image; boundbox from trials
            
            #slides[index][q] = img #problems updating (only first collumn is updating)
            #slides = np.vstack((slides,img))
            #add images from consective images to slides_frames
            slidesB[index] = img
        
            # Now update the previous frame and previous points
            old_frame = frame.copy()
            index = index + 1
            #p0 = good_new.reshape(-1,1,2)
            
        index = 0 #reset index for new ROI
        print('finished tracking images from camB')
        tot_disps = get_3Ddisps(imgA_before, imgA_after, imgB_before, imgB_after)
        disps_roi['ROI # ' + str(q)] = tot_disps  
        
    #iterate over each consecutive image set, stack ROIs for a respective set together, display, move to next set
    #f, axarr = plt.subplots(2,2)
    for s in range(0,len(slidesB)): #for now, just displays on camB images, and two images for the two ROIs
        #plot_image = np.concatenate((slidesA[s], slidesB[s]), axis=1) #slide[image set#][roi #]
        plot_image = slidesB[s]
        plot_image = cv2.resize(plot_image, (1440, 810 ))  #do we need to resize
        cv2.imshow('all ROIs', plot_image)
#        #axarr[1,0].imshow(slides[2][s])
#        #axarr[1,1].imshow(slides[3][s])
#        
        k = cv2.waitKey(0) & 0xff
        if k == 27:
            break    
    cv2.destroyAllWindows()
    
    
    return [tot_disps, disps_roi]
Exemplo n.º 44
0
def get_loads_history(turbineX,turbineY,turb_index,Omega_free,Omega_waked,free_speed,waked_speed,f_atm_free,f_atm_close,f_atm_far,N=24001,TI=0.11,wind_speed=8.,rotor_diameter=126.4):
# def get_loads_history(turbineX,turbineY,turb_index,Omega_free,Omega_waked,free_speed,waked_speed,atm_free,atm_close,atm_far,time,wind_speed=8.,rotor_diameter=126.4,TI=0.11):

    # print 'get loads history'

    # npts = len(time)
    nTurbines = len(turbineX)

    Rhub,r,chord,theta,af,Rhub,Rtip,B,rho,mu,precone,hubHt,nSector,pitch,yaw_deg = setup_airfoil()
    angles = np.linspace(0.,360.,100)
    ccblade_moments = np.zeros_like(angles)

    _, wake_radius = get_speeds(turbineX, turbineY, np.array([0.]), np.array([0.]), np.array([0.]), wind_speed, TI=TI)

    # print 'getting CCBlade moments'
    s = Time.time()
    for i in range(len(ccblade_moments)):
        az = angles[i]
        x_locs,y_locs,z_locs = findXYZ(turbineX[turb_index],turbineY[turb_index],90.,r,yaw_deg,az)
        speeds, _ = get_speeds(turbineX, turbineY, x_locs, y_locs, z_locs, wind_speed,TI=TI)

        if i == 0:
            actual_speed = get_eff_turbine_speeds(turbineX, turbineY, wind_speed,TI=TI)[1]
            Omega = (Omega_waked + (Omega_free-Omega_waked)/(free_speed-waked_speed) * (actual_speed-waked_speed))

        ccblade_moments[i], _ = calc_moment(speeds,Rhub,r,chord,theta,af,Rhub,Rtip,B,rho,mu,precone,hubHt,nSector,Omega,pitch,azimuth=az)

    f_ccblade = interp1d(angles, ccblade_moments/1000., kind='linear')
    # print Time.time()-s

    t = np.linspace(0.,600.,N)
    # t = time
    dt = t[1]-t[0]
    # moments = np.zeros(N)
    moments = np.zeros(N)
    m = np.zeros(N)
    position = 0.

    # s = Time.time()
    # for i in range(N):

    for i in range(N):
        # print '1'
        # s = Time.time()

        amnt_waked = np.zeros(nTurbines)
        dx_dist = np.zeros(nTurbines)
        for waking in range(nTurbines):
            dx = turbineX[turb_index]-turbineX[waking]
            dy = turbineY[turb_index]-turbineY[waking]
            dx_dist[waking] = dx
            if dx < 0.:
                amnt_waked[waking] = 0.
            else:
                amnt_waked[waking] = amount_waked(dy,wake_radius[turb_index][waking]*1.75,rotor_diameter,position)

        waked_array = np.zeros(nTurbines)
        dx_array = np.zeros(nTurbines)

        # print Time.time()-s
        # print '2'
        # s = Time.time()

        num = 0
        indices = np.argsort(dx_dist)
        for waking in range(nTurbines):
            if dx_dist[indices[waking]] > 0.:
                # if num == 0:
                #     if amnt_waked[indices[waking]] > 0.:
                #         waked_array[num] = amnt_waked[indices[waking]]
                #         dx_array[num] = dx_dist[indices[waking]]
                #         num += 1
                # else:
                    if amnt_waked[indices[waking]] > np.sum(waked_array[0:num]):
                        waked_array[num] = amnt_waked[indices[waking]]-np.sum(waked_array[0:num])
                        dx_array[num] = dx_dist[indices[waking]]
                        num += 1

        # print Time.time()-s
        # print '3'
        # s = Time.time()

        down = dx_array/rotor_diameter

        moments[i] = f_ccblade(position)
        m[i] = moments[i]

        unwaked = 1.-np.sum(waked_array)
        # print 'unwaked', unwaked
        for k in range(np.count_nonzero(waked_array)):
            if down[k] < 4.:
                  moments[i] += f_atm_close(t[i])*waked_array[k]
                  # moments[i] += atm_close[i]*waked_array[k]
            elif down[k] > 10.:
                  moments[i] += f_atm_far(t[i])*waked_array[k]
            else:
                  moments[i] += (f_atm_close(t[i])*(10.-down[k])/6.+f_atm_far(t[i])*(down[k]-4.)/6.)*waked_array[k]

        moments[i] += f_atm_free(t[i])*unwaked

        position = (position+(Omega*(dt)/60.)*360.)%360.

        # print Time.time()-s

    # plt.plot(t,f_atm_close(t))
    # plt.plot(t,m)
    if turb_index == 1.:
        plt.plot(t,moments)
    # plt.show()

    return moments
Exemplo n.º 45
0
    def bright_wise_histogram_equalization(self, img_arr, level=256, **args):
        ### split the image to three level according brightness, equalize histogram dividely
        ### @params img_arr : numpy.array uint8 type, 2-dim
        ### @params level : gray scale
        ### @return arr : the equalized image array
        def special_histogram(img_arr, min_v, max_v):
            ### calculate a special histogram with max, min value
            ### @params img_arr : 1-dim numpy.array
            ### @params min_v : min gray scale
            ### @params max_v : max gray scale
            ### @return hists : list type, length = max_v - min_v + 1
            hists = [0 for _ in range(max_v - min_v + 1)]
            for v in img_arr:
                hists[v - min_v] += 1
            return hists

        def special_histogram_cdf(hists, min_v, max_v):
            ### calculate a special histogram cdf with max, min value
            ### @params hists : list type
            ### @params min_v : min gray scale
            ### @params max_v : max gray scale
            ### @return hists_cdf : numpy.array
            hists_cumsum = np.cumsum(np.array(hists))
            hists_cdf = (max_v - min_v) / hists_cumsum[-1] * hists_cumsum + min_v
            hists_cdf = hists_cdf.astype("uint8")
            return hists_cdf

        def pseudo_variance(arr):
            ### calculate a type of variance
            ### @params arr : 1-dim numpy.array
            arr_abs = np.abs(arr - np.mean(arr))
            return np.mean(arr_abs)

        # search two grayscale level, which can split the image into
        # three parts having approximately same number of pixels
        (m, n) = img_arr.shape
        hists = self.calc_histogram_(img_arr)
        hists_arr = np.cumsum(np.array(hists))
        hists_ratio = hists_arr / hists_arr[-1]

        scale1 = None
        scale2 = None
        for i in range(len(hists_ratio)):
            if hists_ratio[i] >= 0.333 and scale1 is None:
                scale1 = i
            if hists_ratio[i] >= 0.667 and scale2 is None:
                scale2 = i
                break

        # split images
        dark_index = (img_arr <= scale1)
        mid_index = (img_arr > scale1) & (img_arr <= scale2)
        bright_index = (img_arr > scale2)

        # variance
        dark_variance = pseudo_variance(img_arr[dark_index])
        mid_variance = pseudo_variance(img_arr[mid_index])
        bright_variance = pseudo_variance(img_arr[bright_index])

        # build three level images
        dark_img_arr = np.zeros_like(img_arr)
        mid_img_arr = np.zeros_like(img_arr)
        bright_img_arr = np.zeros_like(img_arr)

        # histogram equalization individually
        dark_hists = special_histogram(img_arr[dark_index], 0, scale1)
        dark_cdf = special_histogram_cdf(dark_hists, 0, scale1)

        mid_hists = special_histogram(img_arr[mid_index], scale1, scale2)
        mid_cdf = special_histogram_cdf(mid_hists, scale1, scale2)

        bright_hists = special_histogram(img_arr[bright_index], scale2, level - 1)
        bright_cdf = special_histogram_cdf(bright_hists, scale2, level - 1)

        def plot_hists(arr):
            hists = [0 for i in range(256)]
            for a in arr:
                hists[a] += 1
            self.draw_histogram_(hists)

        # mapping
        dark_img_arr[dark_index] = dark_cdf[img_arr[dark_index]]
        mid_img_arr[mid_index] = mid_cdf[img_arr[mid_index] - scale1]
        bright_img_arr[bright_index] = bright_cdf[img_arr[bright_index] - scale2]

        # weighted sum
        # fractor = dark_variance + mid_variance + bright_variance
        # arr = (dark_variance * dark_img_arr + mid_variance * mid_img_arr + bright_variance * bright_img_arr)/fractor
        arr = dark_img_arr + mid_img_arr + bright_img_arr
        arr = arr.astype("uint8")
        return arr
Exemplo n.º 46
0
        inh5file = h5py.File(fullfile, 'r')
        x = inh5file['default']['turns'].value
        # intv = int(np.ceil(len(x)/points))
        for key in inh5file['default'].keys():
            if (key in not_plot):
                continue
            val = inh5file['default'][key].value
            if (key == 'profile'):
                val = val[-1]
            val = val.reshape(len(val))
            if (key not in plot_dir):
                plot_dir[key] = {}
            if (workers not in plot_dir[key]):
                plot_dir[key][workers] = {'num': 0}
            if ('sum' not in plot_dir[key][workers]):
                plot_dir[key][workers]['sum'] = np.zeros_like(val)
                plot_dir[key][workers]['min'] = val
                plot_dir[key][workers]['max'] = val
            plot_dir[key][workers]['num'] += 1
            plot_dir[key][workers]['sum'] += val
            plot_dir[key][workers]['min'] = np.minimum(
                plot_dir[key][workers]['min'], val)
            plot_dir[key][workers]['max'] = np.maximum(
                plot_dir[key][workers]['max'], val)
            plot_dir[key][workers]['turns'] = x
        inh5file.close()

    # continue here, I need to iterate over the errors, create a figure for each
    # iterate over the reduce values, add an error plot line for each acording to the intv etc

    for ts in tss:
Exemplo n.º 47
0
    def get_item_based_topk(self, items, top_k=10, sort_top_k=True):
        """Get top K similar items to provided seed items based on similarity metric defined.
        This method will take a set of items and use them to recommend the most similar items to that set
        based on the similarity matrix fit during training.
        This allows recommendations for cold-users (unseen during training), note - the model is not updated.

        The following options are possible based on information provided in the items input:
        1. Single user or seed of items: only item column (ratings are assumed to be 1)
        2. Single user or seed of items w/ ratings: item column and rating column
        3. Separate users or seeds of items: item and user column (user ids are only used to separate item sets)
        4. Separate users or seeds of items with ratings: item, user and rating columns provided

        Args:predict
            items (pd.DataFrame): DataFrame with item, user (optional), and rating (optional) columns
            top_k (int): number of top items to recommend
            sort_top_k (bool): flag to sort top k results

        Returns:
            pd.DataFrame: sorted top k recommendation items
        """

        # convert item ids to indices
        item_ids = np.asarray(
            list(
                map(
                    lambda item: self.item2index.get(item, np.NaN),
                    items[self.col_item].values,
                )))

        # if no ratings were provided assume they are all 1
        if self.col_rating in items.columns:
            ratings = items[self.col_rating]
        else:
            ratings = pd.Series(np.ones_like(item_ids))

        # create local map of user ids
        if self.col_user in items.columns:
            test_users = items[self.col_user]
            user2index = {
                x[1]: x[0]
                for x in enumerate(items[self.col_user].unique())
            }
            user_ids = test_users.map(user2index)
        else:
            # if no user column exists assume all entries are for a single user
            test_users = pd.Series(np.zeros_like(item_ids))
            user_ids = test_users
        n_users = user_ids.drop_duplicates().shape[0]

        # generate pseudo user affinity using seed items
        pseudo_affinity = sparse.coo_matrix(
            (ratings, (user_ids, item_ids)),
            shape=(n_users, self.n_items)).tocsr()

        # calculate raw scores with a matrix multiplication
        test_scores = pseudo_affinity.dot(self.item_similarity)

        # remove items in the seed set so recommended items are novel
        test_scores[user_ids, item_ids] = -np.inf

        top_items, top_scores = get_top_k_scored_items(scores=test_scores,
                                                       top_k=top_k,
                                                       sort_top_k=sort_top_k)

        df = pd.DataFrame({
            self.col_user:
            np.repeat(test_users.drop_duplicates().values, top_items.shape[1]),
            self.col_item:
            [self.index2item[item] for item in top_items.flatten()],
            self.col_prediction:
            top_scores.flatten(),
        })

        # drop invalid items
        return df.replace(-np.inf, np.nan).dropna()
Exemplo n.º 48
0
    def sample_labelswitch(self):
        """
            Tries to switch two random labels
        """

        if npr.rand() < self.p_switch:
            if self.K < 2:
                return np.array([-1, -1])
            labels = npr.choice(self.K, 2, replace=False)
            if np.sum(self.active_komp[labels]) == 0:
                return np.array([-1, -1])

            lik_old, R_S_mu0, log_det_Q0, R_S0 = self.likelihood_prior(
                self.mu[labels[0]],
                self.sigma[labels[0]],
                labels[0],
                switchprior=True)
            lik_oldt, R_S_mu1, log_det_Q1, R_S1 = self.likelihood_prior(
                self.mu[labels[1]],
                self.sigma[labels[1]],
                labels[1],
                switchprior=True)

            #updated added alpha contribution
            alpha_ = np.zeros_like(self.logisticNormal.alpha)
            alpha_[:] = self.logisticNormal.alpha[:]
            llik_alpha, _, __ = self.logisticNormal.get_lprior_grad_hess(
                alpha_)

            self.p[labels[0]], self.p[labels[1]] = self.p[labels[1]], self.p[
                labels[0]]
            self.logisticNormal.set_alpha_p(self.p)
            lliks_alpha, _, __ = self.logisticNormal.get_lprior_grad_hess()

            lik_old += lik_oldt + llik_alpha
            lik_star = self.likelihood_prior(self.mu[labels[1]],
                                             self.sigma[labels[1]],
                                             labels[0],
                                             R_S_mu0,
                                             log_det_Q0,
                                             R_S1,
                                             switchprior=True)[0]
            lik_star += self.likelihood_prior(self.mu[labels[0]],
                                              self.sigma[labels[0]],
                                              labels[1],
                                              R_S_mu1,
                                              log_det_Q1,
                                              R_S0,
                                              switchprior=True)[0]
            lik_star += lliks_alpha
            if np.log(npr.rand()) < lik_star - lik_old:
                self.active_komp[labels[0]], self.active_komp[
                    labels[1]] = self.active_komp[labels[1]], self.active_komp[
                        labels[0]]
                self.mu[labels[0]], self.mu[labels[1]] = self.mu[
                    labels[1]], self.mu[labels[0]]
                self.sigma[labels[0]], self.sigma[labels[1]] = self.sigma[
                    labels[1]], self.sigma[labels[0]]
                self.p[labels[0]], self.p[labels[1]] = self.p[
                    labels[1]], self.p[labels[0]]
                self.updata_mudata()
                return labels
            self.logisticNormal.set_alpha(alpha_)
            self.p = self.logisticNormal.get_p()

        return np.array([-1, -1])
def window_mask(width, height, img_ref, center, level):
    output = np.zeros_like(img_ref)
    output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0] - level * height),
           max(0, int(center - width / 2)):min(int(center + width / 2), img_ref.shape[1])] = 1
    return output
Exemplo n.º 50
0
w_static = w_static.assign_coords({'times': list(mask.keys())})

##############################################################################
# Compute peaks for each roi
##############################################################################
unique_rois = np.unique(w_static.roi.values)
freqs = w_static.freqs.values

peaks = np.zeros((len(unique_rois), w_static.sizes['freqs']))
peaks = xr.DataArray(peaks, dims=('roi', 'freqs'),
                     coords={'roi': unique_rois,
                             'freqs': freqs})


for i, roi in enumerate(unique_rois):
    counts = np.zeros_like(freqs)
    idx = w_static.roi.values == roi
    w_sel = w_static.isel(roi=idx).stack(T=("trials", "roi")).sel(times="baseline")
    p_idx = []
    for t in range(w_sel.sizes["T"]):
        temp, _ = find_peaks(w_sel[:, t], threshold=1e-8)
        p_idx += [temp]
    p_idx = list(itertools.chain(*p_idx))
    p_idx, c = np.unique(p_idx, return_counts=True)
    counts[p_idx.astype(int)] = c
    peaks[i, :] = counts

# Path to results folder
_RESULTS = os.path.join(_ROOT,
                        "Results/lucy/peaks",
                        f"{sidx}.nc")
Exemplo n.º 51
0
def cross_entropy_neighbors_in_rep(adata, use_rep, n_points=3):
    """Compare neighborhood graph of representation based on cross entropy.

    `n_points` denotes the number of points to add as highlight annotation.

    Returns
    -------
    The cross entropy and the geodesic-distance-weighted cross entropy as
    ``entropy, geo_entropy_d, geo_entropy_o``.

    Adds the most overlapping or disconnected points as annotation to `adata`.
    """
    # see below why we need this
    if 'X_diffmap' not in adata.obsm.keys():
        raise ValueError('Run `tl.diffmap` on `adata`, first.')

    adata_ref = adata  # simple renaming, don't need copy here
    adata_cmp = adata.copy()
    n_neighbors = adata_ref.uns['neighbors']['params']['n_neighbors']
    from .neighbors import neighbors
    neighbors(adata_cmp, n_neighbors=n_neighbors, use_rep=use_rep)
    from .tools.diffmap import diffmap
    diffmap(adata_cmp)

    graph_ref = adata_ref.uns['neighbors']['connectivities']
    graph_cmp = adata_cmp.uns['neighbors']['connectivities']

    graph_ref = graph_ref.tocoo()  # makes a copy
    graph_cmp = graph_cmp.tocoo()

    edgeset_ref = {e for e in zip(graph_ref.row, graph_ref.col)}
    edgeset_cmp = {e for e in zip(graph_cmp.row, graph_cmp.col)}
    edgeset_union = list(edgeset_ref.union(edgeset_cmp))

    edgeset_union_indices = tuple(zip(*edgeset_union))
    edgeset_union_indices = (np.array(edgeset_union_indices[0]), np.array(edgeset_union_indices[1]))

    n_edges_ref = len(graph_ref.nonzero()[0])
    n_edges_cmp = len(graph_cmp.nonzero()[0])
    n_edges_union = len(edgeset_union)
    logg.msg(
        '... n_edges_ref', n_edges_ref,
        'n_edges_cmp', n_edges_cmp,
        'n_edges_union', n_edges_union)

    graph_ref = graph_ref.tocsr()  # need a copy of the csr graph anyways
    graph_cmp = graph_cmp.tocsr()

    p_ref = graph_ref[edgeset_union_indices].A1
    p_cmp = graph_cmp[edgeset_union_indices].A1

    # the following is how one compares it to log_loss form sklearn
    # p_ref[p_ref.nonzero()] = 1
    # from sklearn.metrics import log_loss
    # print(log_loss(p_ref, p_cmp))
    p_cmp = np.clip(p_cmp, EPS, 1-EPS)
    ratio = np.clip(p_ref / p_cmp, EPS, None)
    ratio_1m = np.clip((1 - p_ref) / (1 - p_cmp), EPS, None)

    entropy = np.sum(p_ref * np.log(ratio) + (1-p_ref) * np.log(ratio_1m))

    n_edges_fully_connected = (graph_ref.shape[0]**2 - graph_ref.shape[0])
    entropy /= n_edges_fully_connected

    fraction_edges = n_edges_ref / n_edges_fully_connected
    naive_entropy = (fraction_edges * np.log(1./fraction_edges)
                     + (1-fraction_edges) * np.log(1./(1-fraction_edges)))
    logg.msg('cross entropy of naive sparse prediction {:.3e}'.format(naive_entropy))
    logg.msg('cross entropy of random prediction {:.3e}'.format(-np.log(0.5)))
    logg.info('cross entropy {:.3e}'.format(entropy))

    # for manifold analysis, restrict to largest connected component in
    # reference
    # now that we clip at a quite high value below, this might not even be
    # necessary
    n_components, labels = scipy.sparse.csgraph.connected_components(graph_ref)
    largest_component = np.arange(graph_ref.shape[0], dtype=int)
    if n_components > 1:
        component_sizes = np.bincount(labels)
        logg.msg('largest component has size', component_sizes.max())
        largest_component = np.where(
            component_sizes == component_sizes.max())[0][0]
        graph_ref_red = graph_ref.tocsr()[labels == largest_component, :]
        graph_ref_red = graph_ref_red.tocsc()[:, labels == largest_component]
        graph_ref_red = graph_ref_red.tocoo()
        graph_cmp_red = graph_cmp.tocsr()[labels == largest_component, :]
        graph_cmp_red = graph_cmp_red.tocsc()[:, labels == largest_component]
        graph_cmp_red = graph_cmp_red.tocoo()
        edgeset_ref_red = {e for e in zip(graph_ref_red.row, graph_ref_red.col)}
        edgeset_cmp_red = {e for e in zip(graph_cmp_red.row, graph_cmp_red.col)}
        edgeset_union_red = edgeset_ref_red.union(edgeset_cmp_red)
        map_indices = np.where(labels == largest_component)[0]
        edgeset_union_red = {
            (map_indices[i], map_indices[j]) for (i, j) in edgeset_union_red}

    from .neighbors import Neighbors
    neigh_ref = Neighbors(adata_ref)
    dist_ref = neigh_ref.distances_dpt  # we expect 'X_diffmap' to be already present

    neigh_cmp = Neighbors(adata_cmp)
    dist_cmp = neigh_cmp.distances_dpt

    d_cmp = np.zeros_like(p_ref)
    d_ref = np.zeros_like(p_ref)
    for i, e in enumerate(edgeset_union):
        # skip contributions that are not in the largest component
        if n_components > 1 and e not in edgeset_union_red:
            continue
        d_cmp[i] = dist_cmp[e]
        d_ref[i] = dist_ref[e]

    MAX_DIST = 1000
    d_cmp = np.clip(d_cmp, 0.1, MAX_DIST)  # we don't want to measure collapsing clusters
    d_ref = np.clip(d_ref, 0.1, MAX_DIST)

    weights = np.array(d_cmp / d_ref)            # disconnected regions
    weights_overlap = np.array(d_ref / d_cmp)    # overlapping regions

    # the following is just for annotation of figures
    if 'highlights' not in adata_ref.uns:
        adata_ref.uns['highlights'] = {}
    else:
        # remove old disconnected and overlapping points
        new_highlights = {}
        for k, v in adata_ref.uns['highlights'].items():
            if v != 'O' and v not in {'D0', 'D1', 'D2', 'D3', 'D4'}:
                new_highlights[k] = v
        adata_ref.uns['highlights'] = new_highlights

    # points that are maximally disconnected
    max_weights = np.argpartition(weights, kth=-n_points)[-n_points:]
    points = list(edgeset_union_indices[0][max_weights])
    points2 = list(edgeset_union_indices[1][max_weights])
    found_disconnected_points = False
    for ip, p in enumerate(points):
        if d_cmp[max_weights][ip] == MAX_DIST:
            adata_ref.uns['highlights'][p] = 'D' + str(ip)
            adata_ref.uns['highlights'][points2[ip]] = 'D' + str(ip)
            found_disconnected_points = True
    if found_disconnected_points:
        logg.msg('most disconnected points', points)
        logg.msg('    with weights', weights[max_weights].round(1))

    max_weights = np.argpartition(
        weights_overlap, kth=-n_points)[-n_points:]
    points = list(edgeset_union_indices[0][max_weights])
    for p in points:
        adata_ref.uns['highlights'][p] = 'O'
    logg.msg('most overlapping points', points)
    logg.msg('    with weights', weights_overlap[max_weights].round(1))
    logg.msg('    with d_rep', d_cmp[max_weights].round(1))
    logg.msg('    with d_ref', d_ref[max_weights].round(1))

    geo_entropy_d = np.sum(weights * p_ref * np.log(ratio))
    geo_entropy_o = np.sum(weights_overlap * (1-p_ref) * np.log(ratio_1m))

    geo_entropy_d /= n_edges_fully_connected
    geo_entropy_o /= n_edges_fully_connected

    logg.info('geodesic cross entropy {:.3e}'.format(geo_entropy_d + geo_entropy_o))
    return entropy, geo_entropy_d, geo_entropy_o
plt.figure(figsize=(12, 288), facecolor='white')
df_ints = t_data.select_dtypes([float])
for c in df_ints.columns:
    ax = plt.subplot(200, 4, plot_number)
    sns.distplot(train[c])
    plt.xticks(rotation=45)
    plot_number = plot_number + 1
plt.tight_layout()
plt.show()

## looking at a correlation matrix
t_corr = t_data.corr()

# Set up the matplotlib figure
fig, ax = plt.subplots(figsize=(11, 9))
mask = np.zeros_like(t_corr)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
    ax = sns.heatmap(t_corr,
                     mask=mask,
                     vmax=.3,
                     square=True,
                     center=0,
                     cmap=sns.color_palette("BrBG", 7))
plt.show()

print(t_corr)

from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
Exemplo n.º 53
0
n_input = (r * 2 + 1)**2
n_hidden = 15
n_output = 5

w1 = np.random.randn(n_hidden, n_input) / np.sqrt(np.prod(n_input))
w2 = np.random.randn(5, n_hidden) / np.sqrt(n_hidden)

#ws = np.zeros((1, 5, 5))
#ws[0, 2, 2] = 0.1
#w1 = np.zeros((1, 5, 5))
#w1[0, 3, 2] = 0.1
#w2 = [1.0]
#agent.set_weights(ws, w1, w2)

rmsprop_dw1 = np.zeros_like(w1)
rmsprop_dw2 = np.zeros_like(w2)

mean_reward = []
all_w1 = []
all_w2 = []

upda = 0
noup = 0
nextupdate = datetime.datetime.now() + datetime.timedelta(seconds=10)
while True:
    aifile = "net.txt"
    with open(aifile, 'w') as f:
        print('n_input:', w1.shape[1], file=f)
        print('n_hidden:', w1.shape[0], file=f)
        print('n_output:', w2.shape[0], file=f)
Exemplo n.º 54
0
# result in an error if LaTeX is not installed on your system.  In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)

#------------------------------------------------------------
# Generate random x, y with a given covariance length
np.random.seed(1)
x = np.linspace(0, 1, 500)
h = 0.01
C = np.exp(-0.5 * (x - x[:, None]) ** 2 / h ** 2)
y = 0.8 + 0.3 * np.random.multivariate_normal(np.zeros(len(x)), C)

#------------------------------------------------------------
# Define a normalized top-hat window function
w = np.zeros_like(x)
w[(x > 0.12) & (x < 0.28)] = 1

#------------------------------------------------------------
# Perform the convolution
y_norm = np.convolve(np.ones_like(y), w, mode='full')
valid_indices = (y_norm != 0)
y_norm = y_norm[valid_indices]

y_w = np.convolve(y, w, mode='full')[valid_indices] / y_norm

# trick: convolve with x-coordinate to find the center of the window at
#        each point.
x_w = np.convolve(x, w, mode='full')[valid_indices] / y_norm

#------------------------------------------------------------
Exemplo n.º 55
0
if __name__ == '__main__':

    import pycuda.driver as drv
    import numpy
    import pycuda.autoinit

    mod = drv.SourceModule("""
    __global__ void multiply_them(float *dest, float *a, float *b)
    {
      const int i = threadIdx.x;
      dest[i] = a[i] * b[i];
    }
    """)

    multiply_them = mod.get_function("multiply_them")

    a = numpy.random.randn(400).astype(numpy.float32)
    b = numpy.random.randn(400).astype(numpy.float32)

    dest = numpy.zeros_like(a)
    multiply_them(drv.Out(dest), drv.In(a), drv.In(b), block=(400, 1, 1))

    print dest - a * b
Exemplo n.º 56
0
qf = {}

fibre_marc = {}
fibre_marc_e = {}

for yi,by in enumerate(xyz_pf): 
    
    # brass_y2[:,:,yi] = by
    HxY[yi] = np.cross(symHKL[0],by)
    HxY[yi] = HxY[yi] / np.linalg.norm(HxY[yi],axis=1)[:,None]
    ome[yi] = np.arccos(np.dot(symHKL[0],by))
    
    q0[yi] = {}
    q[yi] = {}
    qf[yi] = {}
    fibre_marc[yi] = np.zeros_like(brassFibre[yi])
    
    for hi,h in enumerate(HxY[yi]):
        
        q0[yi][hi] = quat.normalize(np.hstack( [ np.cos(ome[yi][hi]/2), np.sin(ome[yi][hi]/2) * h ] ))
        q[yi][hi]  = quat.normalize(np.hstack( [ cphi[:, np.newaxis], np.tile( by, (len(cphi),1) ) * sphi[:, np.newaxis] ] ))
        
        qf[yi][hi] = quat.multiply(q[yi][hi], q0[yi][hi])
    
        for qi in range(qf[yi][hi].shape[0]):
            
            fibre_marc[yi][qi,hi,:] = qf[yi][hi][qi,:]
            
    phi1, Phi, phi2 = quat2eu(fibre_marc[yi])
    
    phi1 = np.where(phi1 < 0, phi1 + 2*np.pi, phi1) #brnng back to 0 - 2pi
Exemplo n.º 57
0
def wind_plot():
    """Create southerly wind test data."""
    v = np.full((5, 5), 10, dtype=np.float64)
    u = np.zeros_like(v)
    x, y = np.meshgrid(np.linspace(-120, -60, 5), np.linspace(25, 50, 5))
    return u, v, x, y
Exemplo n.º 58
0
    def getPreprocessedImage(self, evtNumber, image_property):
        disp_medianCorrection = 19
        disp_radialCorrection = 18
        disp_gainMask = 17
        disp_coordy = 16
        disp_coordx = 15
        disp_col = 14
        disp_row = 13
        disp_seg = 12
        disp_quad = 11
        disp_gain = 10
        disp_commonMode = 9
        disp_rms = 8
        disp_status = 7
        disp_pedestal = 6
        disp_photons = 5
        disp_raw = 4
        disp_pedestalCorrected = 3
        disp_commonModeCorrected = 2
        disp_adu = 1

        if image_property == disp_medianCorrection:  # median subtraction
            print "Sorry, this feature isn't available yet"
        elif image_property == disp_radialCorrection:  # radial subtraction + polarization corrected
            self.getEvent(evtNumber)
            calib = self.getCalib(evtNumber)
            if calib:
                self.pf.shape = self.parent.calib.shape
                calib = self.rb.subtract_bkgd(calib * self.pf)
        elif image_property == disp_adu:  # gain and hybrid gain corrected
            calib = self.getCalib(evtNumber)
        elif image_property == disp_commonModeCorrected:  # common mode corrected
            calib = self.getCommonModeCorrected(evtNumber)
        elif image_property == disp_pedestalCorrected:  # pedestal corrected
            calib = self.det.raw(self.evt).astype('float32')
            if calib: calib -= self.det.pedestals(self.evt)
        elif image_property == disp_raw:  # raw
            calib = self.det.raw(self.evt)
        elif image_property == disp_photons:  # photon counts
            calib = self.det.photons(
                self.evt,
                mask=self.parent.mk.userMask,
                adu_per_photon=self.parent.exp.aduPerPhoton)
            if calib is None:
                calib = np.zeros_like(self.parent.exp.detGuaranteed,
                                      dtype='int32')
        elif image_property == disp_pedestal:  # pedestal
            calib = self.parent.det.pedestals(self.parent.evt)
        elif image_property == disp_status:  # status
            calib = self.parent.det.status(self.parent.evt)
        elif image_property == disp_rms:  # rms
            calib = self.parent.det.rms(self.parent.evt)
        elif image_property == disp_commonMode:  # common mode
            calib = self.getCommonMode(evtNumber)
        elif image_property == disp_gain:  # gain
            calib = self.parent.det.gain(self.parent.evt)
        elif image_property == disp_gainMask:  # gain_mask
            calib = self.parent.det.gain_mask(self.parent.evt)
        elif image_property == disp_coordx:  # coords_x
            calib = self.parent.det.coords_x(self.parent.evt)
        elif image_property == disp_coordy:  # coords_y
            calib = self.parent.det.coords_y(self.parent.evt)

        shape = self.parent.det.shape(self.parent.evt)
        if len(shape) == 3:
            if image_property == disp_quad:  # quad ind
                calib = np.zeros(shape)
                for i in range(shape[0]):
                    # FIXME: handle detectors properly
                    if shape[0] == 32:  # cspad
                        calib[i, :, :] = int(i) % 8
                    elif shape[0] == 2:  # cspad2x2
                        calib[i, :, :] = int(i) % 2
                    elif shape[0] == 4:  # pnccd
                        calib[i, :, :] = int(i) % 4
            elif image_property == disp_seg:  # seg ind
                calib = np.zeros(shape)
                if shape[0] == 32:  # cspad
                    for i in range(32):
                        calib[i, :, :] = int(i) / 8
                elif shape[0] == 2:  # cspad2x2
                    for i in range(2):
                        calib[i, :, :] = int(i)
                elif shape[0] == 4:  # pnccd
                    for i in range(4):
                        calib[i, :, :] = int(i)
            elif image_property == disp_row:  # row ind
                calib = np.zeros(shape)
                if shape[0] == 32:  # cspad
                    for i in range(185):
                        calib[:, i, :] = i
                elif shape[0] == 2:  # cspad2x2
                    for i in range(185):
                        calib[:, i, :] = i
                elif shape[0] == 4:  # pnccd
                    for i in range(512):
                        calib[:, i, :] = i
            elif image_property == disp_col:  # col ind
                calib = np.zeros(shape)
                if shape[0] == 32:  # cspad
                    for i in range(388):
                        calib[:, :, i] = i
                elif shape[0] == 2:  # cspad2x2
                    for i in range(388):
                        calib[:, :, i] = i
                elif shape[0] == 4:  # pnccd
                    for i in range(512):
                        calib[:, :, i] = i
Exemplo n.º 59
0
def rewinder_likelihood(dt,
                        nsteps,
                        potential,
                        prog_xv,
                        star_xv,
                        m0,
                        mdot,
                        alpha,
                        betas,
                        theta,
                        selfgravity=False):

    # full array of initial conditions for progenitor and stars
    w0 = np.vstack((prog_xv, star_xv))

    # integrate orbits
    t, w = potential.integrate_orbit(w0, dt=dt, nsteps=nsteps)
    t = t[:-1]
    w = w[:-1]

    # satellite mass
    sat_mass = -mdot * t + m0
    GMprog = potential.parameters['G'] * sat_mass

    # compute approximations of tidal radius and velocity dispersion from mass enclosed
    menc = potential.mass_enclosed(w[:, 0, :3])  # progenitor position orbit
    E_scale = (sat_mass / menc)**(1 / 3.)

    # compute naive tidal radius and velocity dispersion
    rtide = E_scale * np.linalg.norm(w[:, 0, :3],
                                     axis=-1)  # progenitor orbital radius
    vdisp = E_scale * np.linalg.norm(w[:, 0, 3:],
                                     axis=-1)  # progenitor orbital velocity

    # get the instantaneous orbital plane basis vectors (x1,x2,x3)
    basis = get_basis(w[:, 0], theta)

    # star orbits relative to progenitor
    dw = w[:, 1:] - w[:, 0:1]

    # project orbits into new basis
    w123 = np.zeros_like(dw)
    for i in range(3):
        w123[..., i] = np.sum(dw[..., :3] * basis[..., i][:, np.newaxis],
                              axis=-1)
        w123[..., i + 3] = np.sum(dw[..., 3:] * basis[..., i][:, np.newaxis],
                                  axis=-1)

    w123[..., i] += alpha * betas[np.newaxis] * rtide[:, np.newaxis]

    # write like this to allow for more general dispersions...probably want a covariance matrix
    sigmas = np.zeros((nsteps, 1, 6))
    sigmas[:, 0, 0] = rtide
    sigmas[:, 0, 1] = rtide
    sigmas[:, 0, 2] = rtide

    sigmas[:, 0, 3] = vdisp
    sigmas[:, 0, 4] = vdisp
    sigmas[:, 0, 5] = vdisp

    g = -0.5 * np.log(2 * np.pi) - np.log(sigmas) - 0.5 * (w123 / sigmas)**2

    # compute an estimate of the jacobian
    Rsun = 8.
    R2 = (w[:, 1:, 0] + Rsun)**2 + w[:, 1:, 1]**2 + w[:, 1:, 2]**2
    x2 = w[:, 1:, 2]**2 / R2
    log_jac = np.log(R2 * R2 * np.sqrt(1. - x2))

    return g.sum(axis=-1) + log_jac, w
#       'V5AxleArticHGV', 'V6orMoreAxleArticHGV', 'AllHGVs', 'AllMotorVehicles',
#       'Lat', 'Lon'])
#           
#accidents_train = pd.merge(accidents_train, traffic_data, on = ['Location_Easting_OSGR','Location_Northing_OSGR'])


accidents_train['B_Latitude'] = np.array(pd.qcut(accidents_train['Latitude'], q=15, precision=1).astype(str))
accidents_train['B_Longitude'] = np.array(pd.qcut(accidents_train['Longitude'], q=10, precision=1).astype(str))

accidents_train['M_LAT_LON'] = accidents_train[['B_Latitude', 'B_Longitude']].apply(lambda x: '-'.join(x), axis=1)

accidents_train.drop(['B_Latitude', 'B_Longitude'], axis=1, inplace=True)

corr = accidents_train.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True

# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11,9))

# Generate a custom diverging colormap
cmap = sns.diverging_palette(220,10,as_cmap=True)

# Draw the heatmap with he mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidth=.5, cbar_kws={"shrink":.5})

# Drop null columns
accidents_train = accidents_train.drop(['Junction_Detail'], axis=1)
# Drop string variables with a lot of different values
accidents_train.drop(['LSOA_of_Accident_Location', 'Local_Authority_(District)', 'Local_Authority_(Highway)', '1st_Road_Number', '2nd_Road_Number'], axis=1, inplace=True)