Beispiel #1
0
def chicalc(
    Keplerlc, analyticallc, sigma, T_dur
):  #caculates the chi^2 and reduced chi^2 values whilst only using those data points that fall within a transit
    count = 0
    for i in range(
            0, len(Keplerlc['Foldtime']), 1
    ):  #loops over the kepler lc (and analytical lc) inorder to find each points chi^2 value
        if np.abs(
                Keplerlc['Foldtime'][i]
        ) <= T_dur:  #if the data point is part of the required group (i.e. falls within the transit)
            count += 1
            if not 'chi2' in locals(
            ):  #for the first point, create a chi^2 variable
                chi2 = (((Keplerlc['Flux'][i] - analyticallc[1][i])**2) /
                        sigma**2)
            else:  #otherwise add the next points chi^2 value to the total chi^2 value
                chi2 = chi2 + ((
                    (Keplerlc['Flux'][i] - analyticallc[1][i])**2) / sigma**2)
        else:
            continue
    if count == 0:
        return np.nan(), np.nan()
    print 'The chi^2 value is {} (intran)'.format(chi2)  #Testing
    chi2reduced = chi2 / (
        count + 3
    )  #now calculate the reduced chi^2 value for 4 independent varaibles
    print 'The reduced chi^2 value is {} (intran)'.format(
        chi2reduced)  #testing
    return chi2, chi2reduced  #return both chi^2 values
Beispiel #2
0
    def compute_radians(self, df, units):
        if units == 'degrees':
            df['lat_rad'] = np.radians(df[self.flds['y']])
            df['lng_rad'] = np.radians(df[self.flds['x']])
        elif units == 'radians':
            # No conversion necessary
            df['lat_rad'] = df[self.flds['y']]
            df['lng_rad'] = df[self.flds['x']]
        else:
            df['lat_rad'] = np.nan()
            df['lng_rad'] = np.nan()

        return df
def sliding_window_correct(R, window_size=10):
    morphOrder = R['morph']
    lLick, rLick = np.nan(morphOrder.shape), np.nan(morphOrder.shape)
    lLick[np.where((R['first lick'] == 1) & (R['morph'] == 0))[0]] = 1.
    rLick[np.where((R['first lick'] == 2) & (R['morph'] == 1))[0]] = 1.

    boxcar = np.ones([
        window_size,
    ])
    lLick_smooth = astconv.convolve(lLick.ravel(), boxcar)
    rLick_smooth = astconv.convolve(rLick.ravel(), boxcar)

    return lLick_smooth, rLick_smooth
Beispiel #4
0
def align(movie_data, options, args, lrh):
    print 'pICA(scikit-learn)'
    nvoxel = movie_data.shape[0]
    nTR    = movie_data.shape[1]
    nsubjs = movie_data.shape[2]

    align_algo = args.align_algo
    nfeature = args.nfeature
    randseed = args.randseed

    # zscore the data
    bX = np.nan((nsubjs*nvoxel, nTR))
    for m in xrange(nsubjs):
        bX[m*nvoxel:(m+1)*nvoxel, :] = stats.zscore(movie_data[:, :, m].T, axis=0, ddof=1).T
    del movie_data

    np.random.seed(randseed)
    A = np.mat(np.random.random((nfeature, nfeature)))

    ica = FastICA(n_components=nfeature, max_iter=500, w_init=A, random_state=randseed)
    St = ica.fit_transform(bX.T)
    S = St.T
    #bW = ica.transform(bX.T)
    bW = ica.mixing_
    print S.shape
    print bW.shape

    niter = 10
    # initialization when first time run the algorithm
    np.savez_compressed(options['working_path']+align_algo+'_'+lrh+'_'+str(niter)+'.npz',\
                                  bW = bW,  niter=niter)
    return niter
Beispiel #5
0
    def viterbi (self, y , suppx):
        """
        Calculate the maximum a - posteriori assignment of x ’s .
        : param y : a sequence of words
        : param suppx : the support of x ( what values it can attain )
        : param t : the transition distributions of the model
        : param e : the emission distributions of the model
        : return : xhat , the most likely sequence of hidden states ( parts of speech ).
        """
        n = len(y)
        num_tags = len(self.tags)

        prob_v = np.zeros(n, num_tags)
        track_v = np.zeros(n, num_tags)

        prob_v[0, :] = self.q
        for i in range(1, n):
            for j in range(num_tags):
                temp = np.multiply(np.multiply(prob_v[i -1, :], self.t[:, j]), self.e[:, self.words_dict[y[i]]])
                prob_v[i, j ] = np.max(temp)
                track_v[i, j] = np.argmax(temp)

        # calc route
        route = np.nan(n)
        route[n] = self.tags_dict[np.argmax(prob_v[n,:])]
        #track = track_v[np.max(prob_v[n,:])]
        for i in range(n - 1, 0, -1):
            route[i] = self.tags_dict[track_v[self.tags_dict[route[i+1]]]]

        return route
Beispiel #6
0
 def __normalised_true_range(row: Se):
     try:
         # https://www.investopedia.com/terms/a/atr.asp -
         # we use the formula for TR and then divide by close
         return max(row[0] - row[1], abs(row[0] - row[2]), abs(row[1] - row[2])) / row[2]
     except RuntimeError:
         return np.nan()
Beispiel #7
0
def align(movie_data, options, args, lrh):
    print 'pPCA(scikit-learn)'
    nvoxel = movie_data.shape[0]
    nTR    = movie_data.shape[1]
    nsubjs = movie_data.shape[2]

    align_algo = args.align_algo
    nfeature   = args.nfeature

    # zscore the data
    bX = np.nan((nsubjs*nvoxel,nTR))

    for m in xrange(nsubjs):
        bX[m*nvoxel:(m+1)*nvoxel,:] = stats.zscore(movie_data[:, :, m].T, axis=0, ddof=1).T
    del movie_data

    U, s, VT = np.linalg.svd(bX, full_matrices=False)

    bW = np.zeros((nsubjs*nvoxel,nfeature))
    for m in xrange(nsubjs):
        bW[m*nvoxel:(m+1)*nvoxel,:] = U[m*nvoxel:(m+1)*nvoxel,:nfeature]

    niter = 10
    # initialization when first time run the algorithm
    np.savez_compressed(options['working_path']+align_algo+'_'+lrh+'_'+str(niter)+'.npz',\
                                  bW = bW,  niter=niter)
    return niter
def Tsurv(nu12,nu23,masses,m0=1.,fudge=1,res=False):
    """
    Main result from the paper. Return the survival time estimate as a function of the initial period ratios and masses (eq. 81).
    In units of of the innermost orbit
    Returns np.inf if separation wide enough that 3-body MMRs don't overlap.
    
    nu12, nu23 : Initial period ratios
    masses : planet masses
    m0 : star mass
    fudge : fudge factor to adjust the number of resonances taken into account for more than three planets. Usually 1.
    res : Boolean. True means that the exact distance to the closest resonance is used. However, since we do not take into account the actual shape of the two planet MMR the results are less good than by assuming a constant distance $\nu/2$ to the MMR.
    """
    plsepov = get_plsep_ov(nu12,nu23,masses,m0)*fudge**.25
    al12 = nu12**(2/3)
    al23 = nu23**(2/3)
    eta = nu12*(1-nu23)/(1-nu12*nu23)
    plsep = (1-al12)*(1-al23)/(2-al12-al23)
    

    Tnorm = 2**1.5/9*(plsep/plsepov)**6/(1-(plsep/plsepov)**4)*10**(-np.log(1-(plsep/plsepov)**4))
        
    A = np.sqrt(38/pi)
    Mfac = get_Mfac(nu12,nu23,masses,m0)
    PrefacD = Mfac*nu12*A*np.sqrt(eta*(1-eta))*fudge**-2
    if res:
        Deta,u0=distance_eta_diffusion_direction(nu12,nu23,masses)
        Detanorm = np.maximum(Deta,np.sum(masses)**(2/3)/plsep**(1/3))/plsep # the Deta is in unit of plsep since plsep already in Tnorm
        # Minimum distance is comparable to resonance size
        u0 = np.minimum(0.7,np.maximum(0.3,u0)) #Bounds to somewhat take into account the time spent inside the MMR
        Tsurv = ((Detanorm)**2/(PrefacD)*Tnorm*3/2*u0**2*(1-u0)**2)
    else:
        Tsurv = (3/2)**2/PrefacD*Tnorm*3/32 #Deta=3/2 in units of plsep
    return np.nan(Tsurv,nan=np.inf)
Beispiel #9
0
    def house_data_extraction(house_url):
        """
        Returns an numpy array containing the scrapped data from a Zoopla house URL
            Parameters:
                    house_url(list): A string of the house URL 

            Returns:
                    data(array): A 1D numpy array containing the extracted data from each house
        """
        # Open the house link to extract parameters
        html = urlopen(house_url)
        soup = BeautifulSoup(html, 'html.parser')
        pagesource = str(soup).split()

        try:
            # Number of beds
            bed_find = pagesource.index('num_beds:')
            beds = pagesource[bed_find + 1][0]
            # Number of baths
            bath_find = pagesource.index('num_baths:')
            baths = pagesource[bath_find + 1][0]
            # House price
            price_find = pagesource.index('price_actual:')
            price_rough = pagesource[price_find + 1]
            price = price_rough.split(',')[0]
            # Property type
            type_find = pagesource.index('property_type:')
            prop_type_rough = pagesource[type_find + 1]
            prop_type = prop_type_rough.split('"')[1]
            #Lattitude
            lat_find = pagesource.index('"latitude":')
            lat_rough = pagesource[lat_find + 1]
            lat = float(lat_rough.split(',')[0])
            #Longtitude
            lon_find = pagesource.index('"longitude":')
            lon_rough = pagesource[lon_find + 1]
            lon = float(lon_rough.split(',')[0])
            # First published date
            price_history = pagesource.index('class="dp-price-history__item">')
            year = pagesource[price_history + 4].split('<')[0]
            #Distance to nearsest attraction/train station
            station_find = pagesource.index('miles')
            station = float(pagesource[station_find - 1])
        except:
            beds, baths, price, prop_type, lat, lon, year, station = np.nan()

        #Is there a mention of a Loft?
        if 'loft' in str(soup): loft = 1
        else: loft = 0

        #Is there a mention of a Garden?
        if 'garden' in str(soup): garden = 1
        else: garden = 0

        data_extract = np.array([
            price, beds, baths, loft, garden, station, year, lat, lon,
            prop_type
        ])
        return data_extract
def chicalc(Keplerlc, analyticallc, sigma, T_dur): #caculates the chi^2 and reduced chi^2 values whilst only using those data points that fall within a transit
	count = 0
	for i in range(0, len(Keplerlc['Foldtime']), 1): #loops over the kepler lc (and analytical lc) inorder to find each points chi^2 value
		if np.abs(Keplerlc['Foldtime'][i]) <= T_dur: #if the data point is part of the required group (i.e. falls within the transit)
			count += 1
			if not 'chi2' in locals(): #for the first point, create a chi^2 variable
				chi2 = (((Keplerlc['Flux'][i] - analyticallc[1][i])**2) / sigma**2)
			else: #otherwise add the next points chi^2 value to the total chi^2 value
				chi2 = chi2 + (((Keplerlc['Flux'][i] - analyticallc[1][i])**2) / sigma**2)
		else:
			continue
	if count == 0:
		return np.nan(), np.nan()
	print 'The chi^2 value is {} (intran)'.format(chi2) #Testing 
	chi2reduced = chi2 / (count + 3) #now calculate the reduced chi^2 value for 4 independent varaibles
	print 'The reduced chi^2 value is {} (intran)'.format(chi2reduced) #testing
	return chi2, chi2reduced #return both chi^2 values 
Beispiel #11
0
 def safe_check(value):
     """Check for finite value and replace with np.nan if does not exist."""
     try:
         if np.isfinite(value):
             return value
         else:
             return np.nan()
     except ValueError:
         return np.nan
Beispiel #12
0
def ncc(seg, width):

    # Local Variables: B, hash, y, i, siz, s, N, seg, width, S, cacheDir, u, x, Nu, xs, ys, uniq, o2, o1
    # Function calls: load, hashMat, unique, false, floor, sum, nan, uint8, ceil, uniqfilt, length, save, ncc, find, bwlabel, size
    #% N = ncc( seg, w )
    #%
    #% N(i,j) = number of connected components in the
    #%   w-by-w window centered on location (i,j) of seg
    #%
    #% What do I mean by "number of connected components"?
    #% Here are some example segmentation patches:
    #%
    #%   1 1 2 2 1 1
    #%   1 1 2 2 1 1      this patch has THREE segments, even
    #%   1 1 2 2 1 1      though unique() only returns [1;2].
    #%   1 1 2 2 1 1
    #%
    #%   3 3 7 7 7 7
    #%   3 3 7 7 7 7      this patch has FOUR segments, even
    #%   7 7 3 3 3 3      though unique() is simply [3;7].
    #%   7 7 3 3 3 3
    #%
    #% THIS CODE IS VERY SLOW, hence all results are cached.
    cacheDir = '/home/ashishmenon/labpc/oef/cache/clust/nccCache/'
    hash = hashMat(
        np.array(np.vstack((np.hstack((seg.flatten(1))), np.hstack((width))))))
    try:
        np.load(np.array(np.hstack((cacheDir, hash))), 'N')
    except:
        siz = matcompat.size(seg)
        o1 = np.floor(((width - 1.) / 2.))
        o2 = np.ceil(((width - 1.) / 2.))
        #% This takes ~80 seconds per image
        N = np.nan(siz)
        B = false(siz)
        B[int(o1 + 1.) - 1:0 - o2, int(o1 + 1.) - 1:0 - o2] = 1.
        Nu = uniqfilt(seg, o2)
        N[int(np.logical_and(B, Nu == 1.)) - 1] = 1.
        [ys, xs] = nonzero(np.logical_and(B, Nu > 1.))
        for i in np.arange(1., (length(ys)) + 1):
            y = ys[int(i) - 1]
            x = xs[int(i) - 1]
            S = seg[int(y - o1) - 1:y + o2, int(x - o1) - 1:x + o2]
            uniq = np.unique(S)
            N[int(y) - 1, int(x) - 1] = 0.
            for s in uniq.flatten(0).conj():
                u = np.unique(bwlabel((S == s), 4.))
                N[int(y) - 1,
                  int(x) - 1] = N[int(y) - 1, int(x) - 1] + np.sum((u != 0.))

        #% convert to uint8 to save disk space
        #% (but this converts NaNs to 0s..)
        N = np.uint8(N)
        plt.save(np.array(np.hstack((cacheDir, hash))), 'N')

    return [N]
Beispiel #13
0
    def _predict_single_sample(self, internal_ids, sample_name):
        try:
            model = self._model_dict[sample_name]
            pred = model.predict(
                self._peptide_df.learned_rt.values[internal_ids].reshape(
                    *self._target_shape))

            return pred.flatten()

        except KeyError:
            return np.nan()
Beispiel #14
0
def dataCleaner(dataframe):
  """
  Removes the empty rows
  :arg: DataFrame dataframe
  :return: DataFrame dataframe
  """
  dataframe = dataframe.dropna(how='all')
  for col in dataframe:
    dataframe[col] = dataframe[col].apply(lambda x : np.nan() if str(x).isspace() else x)
    dataframe[col] = dataframe[col].fillna(dataframe[col].mean())
  return dataframe
Beispiel #15
0
 def __init__(self, illuminance_transfer_matrix, hyp_parameters, num_points):
     self.num_user, self.num_LED = illuminance_transfer_matrix.shape
     self.illuminance = np.zeros((self.num_user, 1))
     self.dimming = np.zeros((self.num_LED, 1))
     self.hyp_parameters = hyp_parameters
     self.illuminance_set = self.feasible_illuminance(illuminance_transfer_matrix, num_points)
     self.acq_func = np.ones(self.illuminance_set.size, 1) / self.illuminance_set.size
     self.est_sat_fun = np.array([])
     self.est_pref_illuminance = np.nan((self.num_user, 1))
     self.observed_illuminance = np.array([])
     self.observed_feedback = np.array([])
def quater(mn,yr):
	if mn in [1,2,3]:
		return "Q1-"+str(yr)
	elif mn in [4,5,6]:
		return "Q2-"+str(yr)
	elif mn in [7,8,9]:
		return "Q3-"+str(yr)
	elif mn in [10,11,12]:
		return "Q4-"+str(yr)
	else:
		return np.nan()
Beispiel #17
0
def get_data_for_EEMD(sym, column, per, perend):
    datasetname = 'cmcdataset'
    store = StoreDF.select_HDFstore(datasetname)
    [window, period, periodend] = mungeData.conv_win_2_block(0, per, perend)
    df = store.get(sym)
    df = df.loc[~df.index.duplicated(keep='first')]
    try:
        datablock = df[column].iloc[-(period + periodend):-periodend]
    except Exception as e:
        print 'Period outside of length of block ', e
        datablock = np.nan()
    return datablock
Beispiel #18
0
def getSimilarsByDistance(signal, tolerance=0, maxSimilarNumber=np.Inf):
    # Select elements by their closeness first, select required number of the closest ones in groups.
    groupedIndexes=[]
    groupedValues=[]
    while not np.all(np.isnan(signal)):
        elemIndex = np.nonzero(not np.isnan(signal))[0]
        elem = signal[elemIndex]
        indexes = np.nonzero(signal-elem <= tolerance)
        groupedValues.append(np.mean(signal[indexes]))
        signal[indexes] = np.nan(indexes.shape)
        groupedIndexes.append(indexes)
    return groupedValues, groupedIndexes
Beispiel #19
0
def get_dataset_metafeature_from_openml(task_id):
    task = openml.tasks.get_task(task_id)
    dataset = openml.datasets.get_dataset(task.dataset_id)
    features = []
    for f in list_metafeatures:
        try:
            val = dataset.qualities[f]
            if not np.nan(val):
                features.append(val)
            else:
                features.append(0)
        except:
            features.append(0)
    return features
    def costheta_wrt_reference(self,reference=None, DEBUG=False): #second3vector=(0,0,1),NewTriadFromLorentzVector=NewTriadFromLorentzVector):
        if reference is not None:
            _beta=self.beta_vector() # is an np.array
            _beta_u=u.versor(_beta)
            _reference=reference.beta_vector() # is an np.array
            _reference_u=u.versor(_reference)

            cos = contract_tuples(_beta_u, _reference_u, metric = None)
            if DEBUG:
                print('*******','costheta_wrt_reference','*******')
                print('cos',cos,' theta:',np.arccos(cos))
            return cos
        else:
            print('The named paramter *reference* needs to be specified')
            return np.nan()
def get_current_season(date_):
    if isinstance(fecha_stock_actual_start, datetime.datetime):
        date_fisrt_season = datetime.datetime(2016, 1, 1)

        # delta_month = (date_.year - date_fisrt_season.year) * 12 + date_.month - date_fisrt_season.month

        delta_season = (date_.year - date_fisrt_season.year) * 2
        if date_.month <= 6:
            season = delta_season + 1
        else:
            season = delta_season + 2
    else:
        print('Shoud be datetime')
        season = np.nan()
    return season
    def phi_wrt_reference(self,reference=None,second3vector=(0,0,1), DEBUG=False,NewTriadFromLorentzVector=NewTriadFromLorentzVector):
        if reference is not None:
            newBasis=reference.NewTriadFromLorentzVector(second3vector=second3vector )
            print('*******','phi_wrt_reference','*******')
            if DEBUG: print(newBasis['x2prime'])
            if DEBUG: print(newBasis['vectors'])
            newpl=self.Change3DBasis(newBasis['vectors'])

            newreference=reference.Change3DBasis(newBasis['vectors'])
            if DEBUG: print('newreference')
            if DEBUG: newreference.print_fv()
            return newpl.phi()
        else:
            print('The named paramter *reference* needs to be specified')
            return np.nan()
 def calc_median_manual(self, loc=0.0, scale=1.0):
     shape = (self.a + self.b).shape
     i1 = np.ones((1,))
     alpha = self.alpha * i1
     beta = self.beta * i1
     id = np.logical_and(self.alpha > 1.0, self.beta > 1.0)
     if np.any(id == False):
         self.logger.warning("No closed form of median for beta distribution for alpha or beta <= 1.0!" + "\nReturning nan!")
         median = np.nan((alpha+beta).shape)
         id = np.where(id)[0]
         median[id] = (alpha[id] - 1.0/3) / (alpha[id] + beta[id] - 2.0/3)
         return np.reshape(median, shape) + loc
     else:
         self.logger.warning("Approximate calculation for median of beta distribution!")
         return (self.alpha - 1.0/3) / (self.alpha + self.beta - 2.0/3) + loc
def _count_fullres_per_lowres_bead(multiscale_factor, lengths, ploidy,
                                   fullres_torm=None):
    """Count the number of full-res beads corresponding to each low-res bead.
    """

    if multiscale_factor == 1:
        return None

    fullres_indices = _get_struct_indices(
        ploidy=ploidy, multiscale_factor=multiscale_factor,
        lengths=lengths).reshape(multiscale_factor, -1)

    if fullres_torm is not None and fullres_torm.sum() != 0:
        fullres_indices[fullres_indices == np.where(fullres_torm)[0]] = np.nan()

    return (~ np.isnan(fullres_indices)).sum(axis=0)
    def sintheta_wrt_reference(self,reference=None, DEBUG=False): #second3vector=(0,0,1),NewTriadFromLorentzVector=NewTriadFromLorentzVector):
        if reference is not None:
            _beta=self.beta_vector() # is an np.array
            _beta_u=u.versor(_beta)
            _reference=reference.beta_vector() # is an np.array
            _reference_u=u.versor(_reference)

            _ort = np.cross(_beta_u, _reference_u)

            sin = np.sqrt( contract_tuples(_ort, _ort, metric = None) )

            if DEBUG:
                print('*******','sintheta_wrt_reference','*******')
                print('sin',sin,' theta:',np.arcsin(sin))
            return sin
        else:
            print('The named paramter *reference* needs to be specified')
            return np.nan()
Beispiel #26
0
def coeffs_line2(R):
    x1 = R[0]
    y1 = R[1]
    x2 = R[2]
    y2 = R[3]
    B = None
    if (x2 - x1) == 0:
        A = 1
        B = 0
    else:
        A = 1 / (x2 - x1)
    if y2 - y1 == 0:
        B = 1
        A = 0
    elif np.nan(B):
        B = -1 / (y2 - y1)
    C = y1 * (-B) - x1 * A
    coeffs = [A, B, C]
    return coeffs
Beispiel #27
0
def get_current_season(date_):
    '''
    Return the season of the indicates date
    :param date_: date.time
        date
    :return: int
        number of the season
    '''
    if isinstance(date_, datetime.datetime):
        date_fisrt_season = datetime.datetime(2016, 1, 1)
        delta_season = (date_.year - date_fisrt_season.year) * 2
        if date_.month <= 6:
            season = delta_season + 1
        else:
            season = delta_season + 2
    else:
        print('Shoud be datetime')
        season = np.nan()
    return season
def get_delta_theta_e_sol(bpr):
    """
    Appendix B.7 in [prEN 15316-2:2014]

    delta_theta_e_sol = 8K -- for medium window fraction or internal loads (e.g. residential)
    delta_theta_e_sol = 12K -- for large window fraction or internal loads (e.g. office)

    :param bpr:
    :return:
    """

    if 0 <= bpr.architecture.win_wall < 0.5:  # TODO fix criteria
        delta_theta_e_sol = 8  # (K)
    elif 0.5 <= bpr.architecture.win_wall < 1.0:
        delta_theta_e_sol = 12  # (K)
    else:
        delta_theta_e_sol = np.nan()
        print('Error! Unknown window to wall ratio')

    return delta_theta_e_sol
Beispiel #29
0
def binary_search(f, lower, upper, error):
    """
    Perform binary search to find a root of f within the bounds
    [lower, upper] to the specified error.
    
    Parameters
    ----------
    f : (float -> float)
        A continuous real-valued scalar function defined in
        the range [lower, upper].
    lower: float
        Lower limit of binary search
    upper: float
        Upper limit of binary search
    error : float
        Maximum error of result

    Returns
    -------
    float:
        root of f
    float:
        number of iterations
    """
    a = lower
    b = upper
    count = 0
    if sign(f(a)) == sign(
            f(b)):  # This shouldn't happen if I choose my limits right.
        print("No root detected. Choose different limits.", file=sys.stderr)
        return np.nan(), 0
    while b - a > error:
        m = (b + a) / 2
        count += 1
        if sign(f(a)) == sign(
                f(m)
        ):  #If both f(a) and f(m) are the same sign, then the zero is betwen m and b
            a = m
        else:
            b = m
    return m, count
Beispiel #30
0
    def extract_from_indices(self, idxs, check_bounds=False):
        '''
        Helper function to extract the points referred
        to by the 3D indices 'idxs'
        If check_bound is true, then will check all the idxs to see if they are valid
        invalid idxs will get a nan returned
        (Could also make it so invalid idxs don't get anything returned...)
        '''
        assert idxs.shape[1] == 3

        if check_bounds:
            print "Warning - not tested this bit yet"
            # create output array, find which are valid idxs and look up their values
            output_array = np.nan(idxs.shape[0], 3)
            valid_idxs = self.find_valid_idx(idx)
            output_array[valid_idxs] = self.V[idxs[valid_idxs,
                                                   0], idxs[valid_idxs, 1],
                                              idxs[valid_idxs, 2]]
            return output_array
        else:
            return self.V[idxs[:, 0], idxs[:, 1], idxs[:, 2]]
Beispiel #31
0
	def getEvents(events):
		'''
		This function gets all the events from a half-time element and return them as dictonaries
		'''

		eventList=[]
		
		for n in range(1,len(events)):
			kind = events[n].span.get('title')
			if events[n].div.get('style') == 'float:left':
			    time = events[n].td.text
			    hometeam = True
			elif events[n].div.get('style') == 'float:right':
			    time = events[n].findAll('td', 'match-sum-wd-minute')[1].text
			    hometeam = False
			else:
			    hometeam = np.nan()
			if kind != 'Substitute in':
			    name = events[n].div.text.strip()
			    eventDict = {'time':time,
			                'kind':kind,
			                'hometeam':hometeam,
			                'player':name
			                }
			else:
			    playerIn = events[n].div.text.strip()
			    playerOut = 'PLAYER OUT NOT DEFINED'
			    if (len(events[n].findAll('a')) == 2):
    				    playerOut = events[n].findAll('a')[1].text.strip()
				
			    eventDict = {'time':time,
			                'kind':kind,
			                'hometeam':hometeam,
			                'playerIn':playerIn,
			                'playerOut':playerOut
			                }
			eventList.append(eventDict)
		return eventList
Beispiel #32
0
def extract_wc_over_semantic_classes(directory):
    for f in os.listdir(os.fsencode(directory)):
        if f.endswith(b'.csv'):
            file_name = f.decode('utf-8')
            df = pd.read_csv(directory + '/' + file_name)
            print(df.shape)

            try:
                df['wc_over_semantic_classes'] = df['WC'].values / df[
                    'semantic_classes'].values
                print(np.nan(df['semantic_classes']))
            except Exception as e:
                print(e)
            finally:
                df['wc_over_semantic_classes'].replace(np.inf, 0, inplace=True)
                df['wc_over_semantic_classes'].replace(np.nan, 0, inplace=True)

            print(df['WC'].head())
            print(df['semantic_classes'].head())
            print(df['wc_over_semantic_classes'].head())

            print(df.shape)

            df.to_csv(directory + '/' + file_name, index=False)
Beispiel #33
0
def run_model(id):
    from patsy import dmatrix
    from pandas import Series
    import numpy as np
    import hddm
    dataHDDM = hddm.load_csv('DDM/dataHDDM_pmt.csv')
    dataHDDM["subj_idx"] = dataHDDM["participant"]
    del dataHDDM["participant"]
    dataHDDM["SAT"] = dataHDDM.apply(lambda row: 0 if row['SAT'] == "Accuracy" else 1, axis=1)
    dataHDDM["FC"] = dataHDDM.apply(lambda row: -0.5 if row['FC'] == "low" else 0.5, axis=1)
    dataHDDM["contrast"] = dataHDDM.contrast.replace([1,2,3,4,5,6], [-.5,-.3,-.1,.1,.3,.5])
    dataHDDM["givenResp"] = dataHDDM["response"]
    dataHDDM["stim"] = dataHDDM.apply(lambda row: 1 if row['stim'] == 'Right' else 0, axis=1)
    dataHDDM["response"] = dataHDDM.apply(lambda row: 1 if row['givenResp'] == 'Right' else 0, axis=1)

    def v_link_func(x, data=dataHDDM):
        stim = (np.asarray(dmatrix('0 + C(s, [[1], [-1]])',
                               {'s': data.stim.ix[x.index]})))
        return x*stim
    if id < 4:
        ############## M1
        LM = [{'model':'t ~ SAT  + FC + contrast + SAT:FC + SAT:contrast + FC:contrast + SAT:FC:contrast', 'link_func': lambda x: x} ,
			  {'model':'v ~ contrast', 'link_func':v_link_func} ,
			  {'model':'a ~ FC + SAT + SAT:FC', 'link_func': lambda x: x} ]
	deps = {'sz' : 'SAT'}
        inc = ['sv','sz','st','z']
        model_name = "Joint_t0"
    else :
        return np.nan()
    name = 'light_reg_PMT_%s' %str(id)
    m = hddm.HDDMRegressor(dataHDDM, LM , depends_on = deps,
            include=inc, group_only_nodes=['sv', 'sz','st', "sz_SAT"], group_only_regressors=False, keep_regressor_trace=True)
    m.find_starting_values()
    m.sample(iter=10000, burn=8500, thin=1, dbname='DDM/traces/db_%s'%name, db='pickle')
    m.save('DDM/Fits/%s'%name)
    return m
def labeling_vertebrae_T2(
    label, input_anat, input_centerline, input_surface, output_centerline_vertebra, output_surface_vertebra, surface_do
):

    command = "fslhd " + input_anat

    result = commands.getoutput(command)
    orientation = (
        result[result.find("qform_xorient") + 15]
        + result[result.find("qform_yorient") + 15]
        + result[result.find("qform_zorient") + 15]
    )

    if orientation != "ASR":

        print "\nReorient input volume to AP SI RL orientation..."
        sct.run(sct.fsloutput + "fslswapdim tmp.anat AP SI RL " + label.input_path + "/tmp.anat_orient")

        sct.run(sct.fsloutput + "fslswapdim tmp.centerline AP SI RL " + label.input_path + "/tmp.centerline_orient")

        # load_images
        anat_file = nibabel.load(label.input_path + "/tmp.anat_orient")
        anat = anat_file.get_data()
        hdr = anat_file.get_header()
        dims = hdr["dim"]
        scales = hdr["pixdim"]
        # if surface_do==1:
        # surface_file = nibabel.load(input_surface_reorient)
        # surface = surface_file.get_data()

        centerline_file = nibabel.load(label.input_path + "/tmp.centerline_orient")
        centerline = centerline_file.get_data()

    else:
        # loading images
        print "\nLoading Images..."
        anat_file = nibabel.load(input_anat)
        anat = anat_file.get_data()
        hdr = anat_file.get_header()
        dims = hdr["dim"]
        scales = hdr["pixdim"]

        # if surface_do==1:
        # surface_file = nibabel.load(input_surface)
        # surface = surface_file.get_data()

        centerline_file = nibabel.load(input_centerline)
        centerline = centerline_file.get_data()

    # ==================================================
    # Calculation of the profile intensity
    # ==================================================
    print "\nCalculation of the profile intensity..."
    shift_AP = label.shift_AP * scales[1]
    size_AP = label.size_AP * scales[1]
    size_RL = label.size_RL * scales[3]

    np.uint16(anat)

    X, Y, Z = np.where(centerline > 0)

    j = np.argsort(Y)
    y = Y[j]
    x = X[j]
    z = Z[j]

    # eliminating double in y
    index = 0
    for i in range(len(y) - 1):
        if y[i] == y[i + 1]:
            if index == 0:
                index_double = i
            else:
                index_double = np.resize(index_double, index + 1)
                index_double[index] = i
            index = index + 1

    mask = np.ones(len(y), dtype=bool)
    mask[index_double] = False

    y = y[mask]
    x = x[mask]
    z = z[mask]

    # shift the centerline to the spine of shift_AP
    x1 = np.round(x - shift_AP / scales[1])

    # build intensity profile along the centerline
    I = np.zeros((len(y), 1))

    for index in range(len(y)):
        lim_plus = index + 5
        lim_minus = index - 5

        if lim_minus < 0:
            lim_minus = 0
        if lim_plus >= len(x1):
            lim_plus = len(x1) - 1

        # normal vector of the orthogonal plane to the centerline i.e tangent vector to the centerline
        Vx = x1[lim_plus] - x1[lim_minus]
        Vz = z[lim_plus] - z[lim_minus]
        Vy = y[lim_plus] - y[lim_minus]

        d = Vx * x1[index] + Vy * y[index] + Vz * z[index]

        for i_slice_RL in range(2 * np.int(round(size_RL / scales[3]))):
            for i_slice_AP in range(2 * np.int(round(size_AP / scales[1]))):
                result = (d - Vx * (x1[index] + i_slice_AP - size_AP - 1) - Vz * z[index]) / Vy

                if result > anat.shape[1]:
                    result = anat.shape[1]
                I[index] = (
                    I[index]
                    + anat[
                        np.int(round(x1[index] + i_slice_AP - size_AP - 1)),
                        np.int(round(result)),
                        np.int(round(z[index] + i_slice_RL - size_RL - 1)),
                    ]
                )

    # Detrending Intensity
    print "\nDetrending Intensity..."
    start_centerline_y = y[0]
    X = np.where(I == 0)
    mask2 = np.ones((len(y), 1), dtype=bool)
    mask2[X, 0] = False
    # I = I[mask2]

    if label.verbose == 1:
        pl.plot(I)
        pl.xlabel("direction superior-inferior")
        pl.ylabel("intensity")
        pl.title("Intensity profile along the shifted spinal cord centerline")
        pl.show(block=False)

    frequency = scipy.fftpack.fftfreq(len(I[:, 0]), d=1)
    spectrum = np.abs(scipy.fftpack.fft(I[:, 0], n=None, axis=-1, overwrite_x=False))

    Wn = np.amax(frequency) / 10
    N = 5  # Order of the filter
    b, a = scipy.signal.iirfilter(N, Wn, rp=None, rs=None, btype="low", analog=False, ftype="bessel", output="ba")
    I_fit = scipy.signal.filtfilt(b, a, I[:, 0], axis=-1, padtype="constant", padlen=None)

    if label.verbose == 1:
        pl.plot(I[:, 0])
        pl.plot(I_fit)
        pl.show()

    I_detrend = np.zeros((len(I[:, 0]), 1))
    I_detrend[:, 0] = I[:, 0] - I_fit

    I_detrend = I_detrend / abs((np.amin(I_detrend)))
    if label.verbose == 1:
        pl.plot(I_detrend)
        pl.xlabel("direction superior-inferior")
        pl.ylabel("intensity")
        pl.title("Intensity profile along the shifted spinal cord centerline after detrending and basic normalization")
        pl.show(block=False)

    info_1 = input("Is the more rostral vertebrae the C1 or C2 one? if yes, enter 1 otherwise 0:")
    if info_1 == 0:
        level_start = input(
            "enter the level of the more rostral vertebra - choice of the more rostral vertebral level of the field of view:"
        )
    else:
        level_start = 2

    mean_distance_dict = scipy.io.loadmat(label.mean_distance_mat)
    mean_distance = (mean_distance_dict.values()[2]).T
    C1C2_distance = mean_distance[0:2]
    mean_distance = mean_distance[level_start + 1 : len(mean_distance)]

    space = np.linspace(-5 / scales[2], 5 / scales[2], round(11 / scales[2]), endpoint=True)
    pattern = (np.sinc((space * scales[2]) / 15)) ** (20)
    xmax_pattern = np.argmin(pattern)
    pixend = len(pattern) - xmax_pattern

    # ==================================================
    # step 1 : find the first peak
    # ==================================================
    print "\nFinding the First Peak..."
    pattern1 = np.concatenate((pattern, np.zeros(len(I_detrend[:, 0]) - len(pattern))))
    corr_all = scipy.signal.correlate(I_detrend[:, 0], pattern1)
    loc_corr = np.arange(-np.round((len(corr_all) / 2)), np.round(len(corr_all) / 2) + 2)
    index_fp = 0
    count = 0
    for i in range(len(corr_all)):
        if corr_all[i] > 0.1:
            if i == 0:
                if corr_all[i] < corr_all[i + 1]:
                    index_fp = i
                    count = count + 1
            elif i == (len(corr_all) - 1):
                if corr_all[i] < corr_all[i - 1]:
                    index_fp = np.resize(index_fp, count + 1)
                    index_fp[len(index_fp) - 1] = i
            else:
                if corr_all[i] < corr_all[i + 1]:
                    index_fp = np.resize(index_fp, count + 1)
                    index_fp[len(index_fp) - 1] = i
                    count = count + 1
                elif corr_all[i] < corr_all[i - 1]:
                    index_fp = np.resize(index_fp, count + 1)
                    index_fp[len(index_fp) - 1] = i
                    count = count + 1
        else:
            if i == 0:
                index_fp = i
                count = count + 1
            else:
                index_fp = np.resize(index_fp, count + 1)
                index_fp[len(index_fp) - 1] = i
                count = count + 1

    mask_fp = np.ones(len(corr_all), dtype=bool)
    mask_fp[index_fp] = False
    value = corr_all[mask_fp]
    loc_corr = loc_corr[mask_fp]

    loc_corr = loc_corr - I_detrend.shape[0]

    loc_first_peak = xmax_pattern - loc_corr[np.amax(np.where(value > 0.6))]
    Mcorr1 = value[np.amax(np.where(value > 0.6))]

    # building the pattern that has to be added at each iteration in step 2
    if loc_first_peak >= 0:
        template_truncated = pattern[(loc_first_peak + 1) :]
    else:
        template_truncated = np.concatenate((np.zeros(abs(loc_first_peak)), pattern))

    xend = len(template_truncated)

    if label.verbose == 1:
        pl.plot(template_truncated)
        pl.plot(I_detrend)
        pl.title("Detection of First Peak")
        pl.xlabel("direction anterior-posterior (mm)")
        pl.ylabel("intensity")
        pl.show(block=False)

    # smoothing the intensity curve----
    I_detrend[:, 0] = scipy.ndimage.filters.gaussian_filter1d(I_detrend[:, 0], 10)

    loc_peak_I = np.arange(len(I_detrend[:, 0]))
    count = 0
    index_p = 0
    for i in range(len(I_detrend[:, 0])):
        if I_detrend[i] > 0.05:
            if i == 0:
                if I_detrend[i, 0] < I_detrend[i + 1, 0]:
                    index_p = i
                    count = count + 1
            elif i == (len(I_detrend[:, 0]) - 1):
                if I_detrend[i, 0] < I_detrend[i - 1, 0]:
                    index_p = np.resize(index_p, count + 1)
                    index_p[len(index_p) - 1] = i
            else:
                if I_detrend[i, 0] < I_detrend[i + 1, 0]:
                    index_p = np.resize(index_p, count + 1)
                    index_p[len(index_p) - 1] = i
                    count = count + 1
                elif I_detrend[i, 0] < I_detrend[i - 1, 0]:
                    index_p = np.resize(index_p, count + 1)
                    index_p[len(index_p) - 1] = i
                    count = count + 1
        else:
            if i == 0:
                index_p = i
                count = count + 1
            else:
                index_p = np.resize(index_p, count + 1)
                index_p[len(index_p) - 1] = i
                count = count + 1

    mask_p = np.ones(len(I_detrend[:, 0]), dtype=bool)
    mask_p[index_p] = False
    value_I = I_detrend[mask_p]
    loc_peak_I = loc_peak_I[mask_p]

    count = 0
    for i in range(len(loc_peak_I) - 1):
        if i == 0:
            if loc_peak_I[i + 1] - loc_peak_I[i] < round(10 / scales[1]):
                index = i
                count = count + 1
        else:
            if (loc_peak_I[i + 1] - loc_peak_I[i]) < round(10 / scales[1]):
                index = np.resize(index, count + 1)
                index[len(index) - 1] = i
                count = count + 1
            elif (loc_peak_I[i] - loc_peak_I[i - 1]) < round(10 / scales[1]):
                index = np.resize(index, count + 1)
                index[len(index) - 1] = i
                count = count + 1

    mask_I = np.ones(len(value_I), dtype=bool)
    mask_I[index] = False
    value_I = -value_I[mask_I]
    loc_peak_I = loc_peak_I[mask_I]

    from scipy.interpolate import UnivariateSpline

    fit = UnivariateSpline(loc_peak_I, value_I)
    P = fit(np.arange(len(I_detrend)))

    if label.verbose == 1:
        pl.xlim(0, len(I_detrend) - 1)
        pl.plot(loc_peak_I, value_I)
        pl.plot(I_detrend)
        pl.plot(P)
        pl.title("Setting values of peaks at one by fitting a smoothing spline")
        pl.xlabel("direction superior-inferior (mm)")
        pl.ylabel("normalized intensity")
        pl.show(block=False)

    for i in range(len(I_detrend)):
        if P[i] > 0.1:
            I_detrend[i, 0] = I_detrend[i, 0] / abs(P[i])

    # ===================================================================================
    # step 2 : Cross correlation between the adjusted template and the intensity profile
    #          local moving of template's peak from the first peak already found
    # ===================================================================================

    mean_distance_new = mean_distance
    mean_ratio = np.zeros(len(mean_distance))
    L = np.round(1.2 * max(mean_distance)) - np.round(0.8 * min(mean_distance))
    corr_peak = np.nan(np.zeros((L, len(mean_distance))))

    for i_peak in range(len(mean_distance)):
        scale_min = np.round(0.80 * mean_distance_new[i_peak]) - xmax_pattern - pixend
        if scale_min < 0:
            scale_min = 0

        scale_max = np.round(1.2 * mean_distance_new[i_peak]) - xmax_pattern - pixend
        scale_peak = np.arange(scale_min, scale_max + 1)

        for i_scale in range(len(scale_peak)):
            template_resize_peak = np.concatenate([template_truncated, np.zeros(scale_peak[i_scale]), pattern])
            if len(I_detrend[:, 0]) > len(template_resize_peak):
                template_resize_peak1 = np.concatenate(
                    (template_resize_peak, np.zeros(len(I_detrend[:, 0]) - len(template_resize_peak)))
                )
            corr_template = scipy.signal.correlate(I_detrend[:, 0], template_resize_peak)

            if len(I_detrend[:, 0]) > len(template_resize_peak):
                val = np.dot(I_detrend[:, 0], template_resize_peak1.T)
            else:
                I_detrend_2 = np.concatenate(
                    (I_detrend[:, 0], np.zeros(len(template_resize_peak) - len(I_detrend[:, 0])))
                )
                val = np.dot(I_detrend_2, template_resize_peak.T)
            corr_peak[i_scale, i_peak] = val

            if label.verbose == 1:
                pl.xlim(0, len(I_detrend[:, 0]))
                pl.plot(I_detrend[:, 0])
                pl.plot(template_resize_peak)
                pl.show(block=False)

                pl.plot(corr_peak[:, i_peak], marker="+", linestyle="None", color="r")
                pl.title("correlation value against the displacement of the peak (px)")
                pl.show(block=False)

        max_peak = np.amax(corr_peak[:, i_peak])
        index_scale_peak = np.where(corr_peak[:, i_peak] == max_peak)
        good_scale_peak = scale_peak[index_scale_peak][0]
        Mcorr = Mcorr1
        Mcorr = np.resize(Mcorr, i_peak + 2)
        Mcorr[i_peak + 1] = np.amax(corr_peak[:, 0 : (i_peak + 1)])
        flag = 0

        if i_peak > 0:
            if (Mcorr[i_peak + 1] - Mcorr[i_peak]) < 0.4 * np.mean(Mcorr[1 : i_peak + 2] - Mcorr[0 : i_peak + 1]):
                test = i_peak
                template_resize_peak = np.concatenate(
                    (template_truncated, np.zeros(round(mean_distance[i_peak]) - xmax_pattern - pixend), pattern)
                )
                good_scale_peak = np.round(mean_distance[i_peak]) - xmax_pattern - pixend
                flag = 1
        if i_peak == 0:
            if (Mcorr[i_peak + 1] - Mcorr[i_peak]) < 0.4 * Mcorr[0]:
                template_resize_peak = np.concatenate(
                    (template_truncated, np.zeros(round(mean_distance[i_peak]) - xmax_pattern - pixend), pattern)
                )
                good_scale_peak = round(mean_distance[i_peak]) - xmax_pattern - pixend
                flag = 1
        if flag == 0:
            template_resize_peak = np.concatenate((template_truncated, np.zeros(good_scale_peak), pattern))

        mean_distance_new[i_peak] = good_scale_peak + xmax_pattern + pixend
        mean_ratio[i_peak] = np.mean(mean_distance_new[:, 0:i_peak] / mean_distance[:, 0:i_peak])

        template_truncated = template_resize_peak

        if label.verbose == 1:
            pl.plot(I_detrend[:, 0])
            pl.plot(template_truncated)
            pl.xlim(0, (len(I_detrend[:, 0]) - 1))
            pl.show(block=False)

    minpeakvalue = 0.5
    loc_disk = np.arange(len(template_truncated))
    count = 0
    index_disk = 0
    for i in range(len(template_truncated)):
        if template_truncated[i] >= minpeakvalue:
            if i == 0:
                if template_truncated[i] < template_truncated[i + 1]:
                    index_disk = i
                    count = count + 1
            elif i == (len(template_truncated) - 1):
                if template_truncated[i] < template_truncated[i - 1]:
                    index_disk = np.resize(index_disk, count + 1)
                    index_disk[len(index_disk) - 1] = i
            else:
                if template_truncated[i] < template_truncated[i + 1]:
                    index_disk = np.resize(index_disk, count + 1)
                    index_disk[len(index_disk) - 1] = i
                    count = count + 1
                elif template_truncated[i] < template_truncated[i - 1]:
                    index_disk = np.resize(index_disk, count + 1)
                    index_disk[len(index_disk) - 1] = i
                    count = count + 1
        else:
            if i == 0:
                index_disk = i
                count = count + 1
            else:
                index_disk = np.resize(index_disk, count + 1)
                index_disk[len(index_disk) - 1] = i
                count = count + 1

    mask_disk = np.ones(len(template_truncated), dtype=bool)
    mask_disk[index_disk] = False
    loc_disk = loc_disk[mask_disk]
    X1 = np.where(loc_disk > I_detrend.shape[0])
    mask_disk1 = np.ones(len(loc_disk), dtype=bool)
    mask_disk1[X1] = False
    loc_disk = loc_disk[mask_disk1]
    loc_disk = loc_disk + start_centerline_y - 1

    # =====================================================================
    # Step 3: Building of labeled centerline and surface
    # =====================================================================
    print "\nBuilding of Labeled Centerline and Surface..."
    for i in range(len(loc_disk)):

        Index = np.array(np.where(y == loc_disk[i])).T
        lim_plus = Index + 5
        lim_minus = Index - 5

        if lim_minus < 1:
            lim_minus = 1
        if lim_plus > len(x):
            lim_plus = len(x)

        Vx = x[lim_plus] - x[lim_minus]
        Vz = z[lim_plus] - z[lim_minus]
        Vy = y[lim_plus] - y[lim_minus]

        d = Vx * x1[Index] + Vy * y[Index] + Vz * z[Index]

        intersection = np.ones(len(x))
        for j in range(len(x)):
            intersection[j] = np.abs((Vx * x[j] + Vy * y[j] + Vz * z[j] - d))

        min_intersection = np.amin(intersection)
        index_intersection = np.where(min_intersection == intersection)
        loc_disk[i] = y[index_intersection]

    center_disk = np.array(centerline)
    for i in range(len(loc_disk) - 1):
        tmp = center_disk[loc_disk[i] : loc_disk[i + 1]]
        tmp[np.where(tmp == 1)] = i + level_start
        center_disk[loc_disk[i] : loc_disk[i + 1]] = tmp

    center_disk[np.where(center_disk == 1)] = 0

    if level_start == 2:
        center_disk[x[0], (int(round(loc_disk[0] - C1C2_distance[1])) - 1) : loc_disk[0], z[0]] = 2
        center_disk[
            x[0],
            (int(round(loc_disk[0] - C1C2_distance[0] - C1C2_distance[1])) - 1) : int(
                round(loc_disk[0] - C1C2_distance[1] - 1)
            ),
            z[0],
        ] = 1

    # Write NIFTI volumes
    hdr.set_data_dtype("uint8")  # set imagetype to uint8
    print "\nWrite NIFTI volumes..."
    img = nibabel.Nifti1Image(center_disk, None, hdr)
    file_name = output_centerline_vertebra
    nibabel.save(img, file_name)
    print ".. File created:" + file_name
tup = (1, 2, 3, 4, 5)

#%%
#Numpy arrays
import numpy as np
a = np.array([1, 2, 3, 4])
a.mean
a.mean()
a.std()
np.std(a)
np.mean(a)
a.append(6)
c = np.array([1, 2,  np.NAN, 3, 4])
c.ndim
c.shape
c[~np.nan(c)]
c[~np.isnan(c)]
np.mean(c)
np.mean(c[~np.isnan(c)])

#%%
#Fibonacci sequence
def fib(n):
    '''Print a Fibonacci series up to n'''
    a, b = 0, 1
    while a < n:
        print a, 
        a, b = b,  a+b     
fib(1000)

#%%
 def nestData(self, universalFirstDate, universalLastDate):
     frontLength = self.firstDate - universalFirstDate
     backLength = universalLastDate - self.lastDate
     self.allData = np.hstack([np.nan(1,frontLength), self.currentData, np.nan(1,backLength)])
Beispiel #37
0
 def __init__(self):
     self.gamma = np.nan()  # unit weight
     self.E = np.nan()  #Young Modulus
     self.name = 'None'  #name of material
     self.poisson_ratio = 0.25
Beispiel #38
0
def test_weird_data():
    with raises(TypeError):
        binary_search([np.nan(), None, 0.1, 0.5], 0.5)