def peak_find_3d(f):
    peak_p = np.fill(np.shape(f), True)
    peak_n = np.fill(np.shape(f), True)
    neighbours = [-1,0,1]
    for i in neighbours:
        for j in neighbours:
            for k in neighbours:
                if (not(i==0 and j==0 and k==0)):
                    temp = np.roll(f,(i,j,k),axis=(-3,-2,-1))
                    peak_p = np.where((f > temp), peak_p, False)
                    peak_n = np.where((f < temp), peak_p, False)
    peak_p_ind = np.nonzero(peak_p)
    peak_n_ind = np.nonzero(peak_n)
    return peak_p, peak_n, peak_p_ind, peak_n_ind
Пример #2
0
    def saveInferenceFile(self):
        metadatafile = self.inferenceFile.rpartition('.')[0] + '_metadata.txt'

        try:
            flNum = int(
                re.search('fl\d', self.inferenceFile,
                          re.IGNORECASE).group()[-1])
        except AttributeError:
            getLogger(__name__).warning(
                'Could not guess feedline from filename.')
            flNum = 0

        ws_good_inds = self.goodPeakIndices
        freqs = np.append(self.inferenceData.freqs[ws_good_inds],
                          self.inferenceData.freqs[ws_bad_inds])
        sort_inds = np.argsort(freqs)
        resIds = np.arange(freqs.size) + flNum * 10000

        flag = np.fill(freqs.size, sweepdata.ISBAD)
        flag[self.goodPeakIndices] = sweepdata.ISGOOD
        smd = sweepdata.SweepMetadata(resid=resIds,
                                      flag=flag[sort_inds],
                                      wsfreq=freqs[sort_inds],
                                      file=metadatafile)
        smd.save()
def get_semantic_voxel_from_point(point, voxel_size, voxels_per_side):
    grid_size = voxel_size * voxels_per_side
    grid_size_inv = 1 / grid_size
    block_coordinate = get_grid_index_from_point(point, grid_size_inv)
    point_local = point - block_coordinate * np.fill(grid_size, 3)
    local_coordinate = get_grid_index_from_point(point_local, 1 / voxel_size)
    return block_coordinate, local_coordinate
Пример #4
0
 def __init(self, chrom_name, chrom_size, chunk_size, dtype):
     self.name = chrom_name
     self.chrom_size = chrom_size
     self.index_size = chrom_size / chunk_size + 1
     self.dtype = dtype
     self.index = np.fill(index_size, -1, dtype='int32')
     self.values = []
Пример #5
0
def find_duplicate_columns(A):
    N = A.shape[0]
    P = A.shape[1]
    indices_duplicated = np.fill((N, 1, P), 1, dtype=np.int32)
    for idx in range(N):
        _, indices = np.unique(A[idx], return_index=True, axis=0)
        indices_duplicated[idx, :, indices] = 0
    return indices_duplicated
 def get_incoming_edges(self, tail: int) -> Iterable[Edge]:
     tail_col = self.adj_mat[:, tail]
     with_edge = np.isfinite(tail_col)
     edge_costs = tail_col[with_edge]
     heads = np.argwhere(with_edge).flatten()
     edges = np.transpose(
         np.array([heads, np.fill(heads.shape, tail), edge_costs]))
     return [Edge(*values) for values in edges]
Пример #7
0
 def __init__(self, input_len, hidden_len, init_fctn=None):
     self.input_len = input_len
     self.hidden_len = hidden_len
     if init_fctn is None:
         init_fctn = lambda shape: 0.1 * np.random.randn(*shape) - 0.05
     self.Wz = init_fctn([hidden_len, input_len + hidden_len])
     self.Wr = init_fctn([hidden_len, input_len + hidden_len])
     self.Wp = init_fctn([hidden_len, input_len + hidden_len])
     self.bz = init_fctn([hidden_len])
     # The reset and weighting signals need special initialization.
     if init_fctn is None:
         self.br = np.fill(shape, -1.0)
         self.bz = np.fill(shape, 0.5)
     else:
         self.br = init_fctn([hidden_len])
         self.bz = init_fctn([hidden_len])
     self.bp = init_fctn([hidden_len])
Пример #8
0
 def filled(self):
     if self._fld is not None:
         # if a window extends beyond image limits, the fill value for the
         # out-of-bounds pixels is set to zero (ie. those patch pixels are not "filled")
         fill, _ =  copyutils.getWindow(self._fld, self._coords,
                                        self._w, outofboundsvalue=False)
     else:
         fill = np.fill((2*w+1,2*w+1),True,dtype=np.uint8)
     return fill        
 def filled(self):
     if self._fld is not None:
         # if a window extends beyond image limits, the fill value for the
         # out-of-bounds pixels is set to zero (ie. those patch pixels are not "filled")
         fill, _ =  copyutils.getWindow(self._fld, self._coords,
                                        self._w, outofboundsvalue=False)
     else:
         fill = np.fill((2*w+1,2*w+1),True,dtype=np.uint8)
     return fill        
Пример #10
0
def convertData():
    print "Converting..."

    WINDOW_SIZE = 10  # so luong item muon fetch
    WINDOW_INDEX = 0
    NUMBER_OF_DOC = 0
    db = DB()
    # STEP 1: tính tổng trọng số của các lớp
    # Đọc toàn bộ db, khi nào ko còn row nào thì thôi
    while True:
        start = WINDOW_SIZE * WINDOW_INDEX  + 1
        stop  = WINDOW_SIZE * (WINDOW_INDEX + 1)
        # things = query.slice(start, stop).all()
        query = "select id, cate_id, tf from " + TABLE + " order by id limit " + str(start) + ", " + str(WINDOW_SIZE)
        logger.info(query)

        cursor = db.cursor()
        # logger.info(query)
        cursor.execute(query)
        rows = cursor.fetchall()
        #import pdb
        #pdb.set_trace()

        if rows == None or len(rows) == 0:
            break
        else:
            logger.info("Query size: " + str(len(rows)))

            for row in rows:
                content = row['tf']
                cateId = row['cate_id']
                docId = row["id"]
                # print content
                try :
                    mapWeightInDoc = json.loads(content)
                except:
                    continue
                trainItem = np.array([])
                for word in mapWeightInDoc:
                    if WORDS.has_key(word):
                        trainItem = np.append([mapWeightInDoc[word]])
                    else:
                        trainItem = np.append([0])

                DOC_TRAIN = np.append([trainItem])
                trainItem = np.fill(0)
                CLASS_TRAIN = np.append([cateId])
            print CLASS_TRAIN
            return None
Пример #11
0
    def binarize_dicts(self, Y_dicts, *, default=0.0, **kwargs):
        binarized = np.fill((Y_dicts.shape[0], len(self.classes_)),
                            default,
                            dtype=np.float)
        classes_map = dict((c, i) for i, c in enumerate(self.classes_))

        for i in range(Y_dicts.shape[0]):
            d = Y_dicts[i]
            for k, p in d.items():
                try:
                    binarized[i, classes_map[k]] = p
                except IndexError:
                    pass
            #end for
        #end for

        return binarized
Пример #12
0
def galaxy_bias(power_k, redshift=1.):
    """ Fiducial galaxy bias for given k bins and redshift

    Parameters
    ----------
    power_k: numpy array of floats
        k bins
    redshift: float
        redshift (default 1.)

    Returns
    -------
    numpy array of floats
        fiducial galaxy bias
    """

    bias = np.zeros(power_k.size)
    bias = np.fill(1.5)
    return bias
Пример #13
0
def propensity_score(data: pd.DataFrame,
                     X: str,
                     Y: str,
                     Z: str = None,
                     estimator: str = "logit") -> float:
    # Try get value from estimators
    estimator = _try_get(estimator, estimators)

    Z = _as_set(Z)
    if Z:
        # Build the formula
        formula = f"{X} ~ " + " + ".join(Z)
        # Fit the estimator
        estimator = estimator(formula, data)
        estimator = estimator.fit()
        # Compute the propensity given Z
        propensity = estimator.predict(data)
    else:
        # Compute the propensity without Z
        propensity = np.mean(data[X])
        propensity = np.fill((len(data), ), propensity)

    return propensity
Пример #14
0
 def encode(self, label_str, on_value=1, off_value=0):  # pylint: disable=arguments-differ
     e = np.fill(self.vocab_size, off_value, dtype=np.int32)
     e[self._class_labels.index(label_str)] = on_value
     return e.tolist()
Пример #15
0
 def visit_array(self, struct, attr, meta):
     if "__has_minValue" in meta and meta["__has_minValue"]:
         v = np.fill(getattr(struct, attr).shape,
                     dtype=meta["_get_type"](meta["minValue"]()))
         setattr(struct, attr, v)
Пример #16
0
ed2 = cv2.adaptiveThreshold(edges,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)


def get_dark_point(im, threshold=50):
    while True:
        x, y = random.randint(0, im.shape[0]), random.randint(0, im.shape[1])
        if im[x, y] < threshold:
            return (x, y)


def get_dp_within(im, p, threshold=50, dist2=900):
    while True:
        x, y = random.randint(0, im.shape[0]), random.randint(0, im.shape[1])
        if (x - p[0]) * (x - p[0]) + (y - p[1]) * (y - p[1]) < dist2:
            if im[x, y] < threshold:
                return (x, y)


blank_image = np.fill((img.shape), 255)
for i in range(4):
    ps = []
    ps.append(get_dark_point(ed2))
    for j in range(3):
        ps.append(get_dp_within(ed2, ps[0]))
        cv2.line(blank_image, ps[j], ps[j + 1], 0, 2)

cv2.imshow("edges", ed2)
cv2.imshow("lines", blank_image)
cv2.waitKey()
Пример #17
0
def get_pt(mean, stddev=0.0, size=1):
    if stddev:
        pt = np.random.normal(loc=mean, scale=stddev, size=size)
        return pt.astype(np.int64)
    return np.fill(mean, size, np.int64)
Пример #18
0
    def __init__(self,
                 y,
                 nseasons: int,
                 season_duration: int = 1,
                 initial_state_prior=None,
                 innovation_sd_prior: R.SdPrior = None,
                 sdy: float = None):
        """
        Args:
          y: The time series being modeled.  This can be omitted if either (a)
            initial_state_prior and sdy and initial_y are passed, or (b) sdy
            and initial_y are passed.
          nseasons: The number of seasons in a cycle.
          season_duration:  The number of time periods each season.  See below.

          initial_state_prior: A multivariate normal distribution of dimension
            nseasons - 1.  This is a distribution on the seasonal value at time
            0 and on the nseasons-2 previous values.  If None is passed then a
            default prior will be assumed.

          innovation_sd_prior: Prior distribution on the standard deviation of
            the innovation terms.  If None, then a default prior will be
            assumed.
          sdy: The standard deviation of the time series being modeled.

        Details:

        """
        self._nseasons = nseasons
        self._season_duration = season_duration

        if initial_state_prior is None:
            if sdy is None:
                if y is None:
                    raise Exception("One of 'y', 'sdy', or "
                                    "'initial_state_prior' must be supplied.")
                sdy = np.nanstd(y, ddof=1)
            initial_state_prior = self._default_initial_state_prior(sdy)

        if isinstance(initial_state_prior, R.NormalPrior):
            dim = nseasons - 1
            mu = initial_state_prior.mean
            sigma = initial_state_prior.sd
            initial_state_prior = R.MvnPrior(
                mu=np.fill(dim, mu),
                Sigma=np.diag(np.fill(dim, sigma * sigma)))

        if not isinstance(initial_state_prior, R.MvnPrior):
            raise Exception("Unexpected type for 'initial_state_prior'.  "
                            "Acceptable types include R.NormalPrior or "
                            "R.MvnPrior.")
        self._initial_state_prior = initial_state_prior

        if innovation_sd_prior is None:
            if sdy is None:
                if y is None:
                    raise Exception("One of 'y', 'sdy', or "
                                    "'innovation_sd_prior' must be supplied.")
                sdy = np.nanstd(y, ddof=1)
            innovation_sd_prior = self._default_sigma_prior(sdy)
        if not isinstance(innovation_sd_prior, R.SdPrior):
            raise Exception("Expected an R.SdPrior for innovation_sd_prior.")
        self._innovation_sd_prior = innovation_sd_prior

        self._build_model()
        self._state_contribution = None
Пример #19
0
import numpy as np
import matplotlib.pyplot as plt

# Simulation constants
alpha = 0.00001  # Laplician multiplier
n = 8000  # number of iterations
s = 100  # size of the matrix
dx = 2/s  # Simulation speed/accuracy

# Matrix initialization
M = np.fill((s, s), 0.0)
# Hotspot Seeding
M[20:30, 45:50] = 90
M[70:80, 45:50] = 90

# Implementation of the decretized laplacian operator
def laplacian(Z):
    Ztop = Z[0:-2, 1:-1]
    Zleft = Z[1:-1, 0:-2]
    Zbottom = Z[2:, 1:-1]
    Zright = Z[1:-1, 2:]
    Zcenter = Z[1:-1, 1:-1]
    return (Ztop + Zleft + Zbottom + Zright - (4 * Zcenter)) / dx**2

# Iterations through PDE
for _ in range(n):
    Mn = M[1:-1, 1:-1]
    M[1:-1, 1:-1] = Mn + np.multiply(laplacian(M), alpha)

# Graph Data
plt.imshow(M, cmap=plt.cm.coolwarm,