예제 #1
0
def dot(a, b, out=None):
    out = asarray(np.dot(a, b, out=out))
    if a._valid is not None or b._valid is not None:
        valid = np.logical_not(np.dot(np.logical_not(a._effective_valid()),
                                     np.logical_not(b._effective_valid())))
        out._add_valid(valid)
    return out
 def observe(self,cur_line):
     
     # Get the right RSS out
     self.rss_obj.observe(cur_line)
     cur_obs = self.rss_obj.get_rss()
     
     # if we are in calibration time, add the current observation to off 
     # buffer
     if np.logical_not(self.__is_ltb_full()):
         self.__add_obs_to_off_buff(cur_obs)
     
     # if we are done with calibration, and the pmfs have not been set, then
     # set them
     elif np.logical_not(self.is_updated):
         self.__set_static_gaus_pmfs()
         self.is_updated = 1
     
     # if we are done with calibration, and the pmfs are set, then go!
     if self.is_updated:
         
         # Get likelihoods of current vector observation
         self.__update_b_vec(cur_obs)
 
         # make a function call to update alpha
         self.__update_alpha()
         
         # update pmfs if necessary
         self.__update_pmfs(cur_obs)
def circleNum(imageMatrix):
    """
    number of circles of a digit image
    :param dataMatrix: digit image
    :return: number of circles
    """
    M, N = imageMatrix.shape
    visited = np.zeros((M, N), dtype = int)
    stack = [(0, 0)]
    visited[0][0] = 1
    circle = 0
    while True:
        while len(stack) != 0:# do DFS to find connected component
            current = stack[-1]
            available = reachable(current, imageMatrix, visited)
            if len(available) == 0:
                stack.pop()
            else:
                chosen = available[0]
                visited[chosen[0]][chosen[1]] = 1
                stack.append(chosen)
        temp = np.logical_xor(visited, imageMatrix)
        if np.logical_not(temp.all()):# if there are components unvisited
            circle += 1
            i, j = argmax(np.logical_not(temp))# do DFS in one of the unvisited components
            stack.append((i, j))
            visited[i][j] = 1
        else:# all components visited
            return circle
예제 #4
0
파일: misc.py 프로젝트: B-Rich/nipype
    def _run_interface(self, runtime):
        nii1 = nb.load(self.inputs.volume1)
        nii2 = nb.load(self.inputs.volume2)

        origdata1 = np.logical_not(np.logical_or(nii1.get_data() == 0, np.isnan(nii1.get_data())))
        origdata2 = np.logical_not(np.logical_or(nii2.get_data() == 0, np.isnan(nii2.get_data())))

        if isdefined(self.inputs.mask_volume):
            maskdata = nb.load(self.inputs.mask_volume).get_data()
            maskdata = np.logical_not(np.logical_or(maskdata == 0, np.isnan(maskdata)))
            origdata1 = np.logical_and(maskdata, origdata1)
            origdata2 = np.logical_and(maskdata, origdata2)

        for method in ("dice", "jaccard"):
            setattr(self, "_" + method, self._bool_vec_dissimilarity(origdata1, origdata2, method=method))

        self._volume = int(origdata1.sum() - origdata2.sum())

        both_data = np.zeros(origdata1.shape)
        both_data[origdata1] = 1
        both_data[origdata2] += 2

        nb.save(nb.Nifti1Image(both_data, nii1.get_affine(), nii1.get_header()), self.inputs.out_file)

        return runtime
예제 #5
0
    def __dectree_train(self, X, Y, L, R, F, T, next, depth, minParent, maxDepth, minScore, nFeatures):
        """
        This is a recursive helper method that recusively trains the decision tree. Used in:
            train

        TODO:
            compare for numerical tolerance
        """
        n,d = mat(X).shape

        # check leaf conditions...
        if n < minParent or depth >= maxDepth or np.var(Y) < minScore:
            assert n != 0, ('TreeRegress.__dectree_train: tried to create size zero node')
            return self.__output_leaf(Y, n, L, R, F, T, next)

        best_val = np.inf
        best_feat = -1
        try_feat = np.random.permutation(d)

        # ...otherwise, search over (allowed) features
        for i_feat in try_feat[0:nFeatures]:
            dsorted = arr(np.sort(X[:,i_feat].T)).ravel()                       # sort data...
            pi = np.argsort(X[:,i_feat].T)                                      # ...get sorted indices...
            tsorted = Y[pi].ravel()                                             # ...and sort targets by feature ID
            can_split = np.append(arr(dsorted[:-1] != dsorted[1:]), 0)          # which indices are valid split points?

            if not np.any(can_split):          # no way to split on this feature?
                continue

            # find min weighted variance among split points
            val,idx = self.__min_weighted_var(tsorted, can_split, n)

            # save best feature and split point found so far
            if val < best_val:
                best_val = val
                best_feat = i_feat
                best_thresh = (dsorted[idx] + dsorted[idx + 1]) / 2

        # if no split possible, output leaf (prediction) node
        if best_feat == -1:         
            return self.__output_leaf(Y, n, L, R, F, T, next)

        # split data on feature i_feat, value (tsorted[idx] + tsorted[idx + 1]) / 2
        F[next] = best_feat
        T[next] = best_thresh
        go_left = X[:,F[next]] < T[next]
        my_idx = next
        next += 1

        # recur left
        L[my_idx] = next    
        L,R,F,T,next = self.__dectree_train(X[go_left,:], Y[go_left], L, R, F, T, 
            next, depth + 1, minParent, maxDepth, minScore, nFeatures)

        # recur right
        R[my_idx] = next    
        L,R,F,T,next = self.__dectree_train(X[np.logical_not(go_left),:], Y[np.logical_not(go_left)], L, R, F, T, 
            next, depth + 1, minParent, maxDepth, minScore, nFeatures)

        return (L,R,F,T,next)
예제 #6
0
def test_multilabel_hamming_loss():
    # Dense label indicator matrix format
    y1 = np.array([[0, 1, 1], [1, 0, 1]])
    y2 = np.array([[0, 0, 1], [1, 0, 1]])

    assert_equal(hamming_loss(y1, y2), 1 / 6)
    assert_equal(hamming_loss(y1, y1), 0)
    assert_equal(hamming_loss(y2, y2), 0)
    assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
    assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
    assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
    assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)

    with ignore_warnings():  # sequence of sequences is deprecated
        # List of tuple of label
        y1 = [(1, 2,), (0, 2,)]
        y2 = [(2,), (0, 2,)]

        assert_equal(hamming_loss(y1, y2), 1 / 6)
        assert_equal(hamming_loss(y1, y1), 0)
        assert_equal(hamming_loss(y2, y2), 0)
        assert_equal(hamming_loss(y2, [(), ()]), 0.75)
        assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
        assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
                                         classes=np.arange(11)), 0.1818, 2)
예제 #7
0
def evolution_of_votes_singleMP(dates, votes, wa_all, wa_party, name, asciiname):
    if not do_plots: return
    f = plt.figure(figsize=figsize_long)
    f.suptitle(u'Гласове и отсъствия на %s през годините.'%name)
    absences = f.add_subplot(3,1,3)
    with_all = f.add_subplot(3,1,1, sharex=absences)
    with_party = f.add_subplot(3,1,2, sharex=absences)

    all_votes_no_abs = np.sum(votes[:,:3], 1)
    all_votes = np.sum(votes, 1)
    mask_no_abs = np.logical_not(all_votes_no_abs)
    mask = np.logical_not(all_votes)
    with_all_array = np.ma.masked_array(100*wa_all[:,0], mask=mask_no_abs)/all_votes_no_abs
    with_party_array = np.ma.masked_array(100*wa_party[:,0], mask=mask_no_abs)/all_votes_no_abs
    absences_array = np.ma.masked_array(100*votes[:,3], mask=mask)/all_votes

    with_all.plot(dates, with_all_array, '.-', alpha=0.3, linewidth=0.1)
    with_all.legend([u'% съгласие с мнозинството (без отсъствия)'])
    with_party.plot(dates, with_party_array, '.-', alpha=0.3, linewidth=0.1)
    with_party.legend([u'% съгласие с партията (без отсъствия)'])
    absences.plot(dates, absences_array, '.-', alpha=0.3, linewidth=0.1)
    absences.legend([u'% отсъствия'])

    with_all.set_yticks([25, 50, 75])
    with_party.set_yticks([25, 50, 75])
    absences.set_yticks([25, 50, 75])
    with_all.set_ylim(0, 100)
    with_party.set_ylim(0, 100)
    absences.set_ylim(0, 100)
    absences.set_xlim(dates[0], dates[-1])
    f.autofmt_xdate()
    f.savefig('generated_html/vote_evol_%s.png'%asciiname)
    plt.close()
예제 #8
0
def splitFullSparse(X, Y):
    indexSparse = Y[:, 10] == -1
    xFull = X[np.logical_not(indexSparse), :]
    yFull = Y[np.logical_not(indexSparse), :]
    xSparse = X[indexSparse, :]
    ySparse = Y[indexSparse, :]
    return xFull, yFull, xSparse, ySparse
예제 #9
0
def test_multilabel_accuracy_score_subset_accuracy():
    # Dense label indicator matrix format
    y1 = np.array([[0, 1, 1], [1, 0, 1]])
    y2 = np.array([[0, 0, 1], [1, 0, 1]])

    assert_equal(accuracy_score(y1, y2), 0.5)
    assert_equal(accuracy_score(y1, y1), 1)
    assert_equal(accuracy_score(y2, y2), 1)
    assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
    assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
    assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
    assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)

    with ignore_warnings():  # sequence of sequences is deprecated
        # List of tuple of label
        y1 = [(1, 2,), (0, 2,)]
        y2 = [(2,), (0, 2,)]

        assert_equal(accuracy_score(y1, y2), 0.5)
        assert_equal(accuracy_score(y1, y1), 1)
        assert_equal(accuracy_score(y2, y2), 1)
        assert_equal(accuracy_score(y2, [(), ()]), 0)
        assert_equal(accuracy_score(y1, y2, normalize=False), 1)
        assert_equal(accuracy_score(y1, y1, normalize=False), 2)
        assert_equal(accuracy_score(y2, y2, normalize=False), 2)
        assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
예제 #10
0
 def test_insert_nan(self):
     """Test fetching of null values"""
     b = self.rel.fetch('value', order_by='id')
     assert_true((np.isnan(self.a) == np.isnan(b)).all(),
                 'incorrect handling of Nans')
     assert_true(np.allclose(self.a[np.logical_not(np.isnan(self.a))], b[np.logical_not(np.isnan(b))]),
                 'incorrect storage of floats')
예제 #11
0
def _extrapolate_out_mask(data, mask, iterations=1):
    """ Extrapolate values outside of the mask.
    """
    if iterations > 1:
        data, mask = _extrapolate_out_mask(data, mask,
                                          iterations=iterations - 1)
    new_mask = ndimage.binary_dilation(mask)
    larger_mask = np.zeros(np.array(mask.shape) + 2, dtype=np.bool)
    larger_mask[1:-1, 1:-1, 1:-1] = mask
    # Use nans as missing value: ugly
    masked_data = np.zeros(larger_mask.shape + data.shape[3:])
    masked_data[1:-1, 1:-1, 1:-1] = data.copy()
    masked_data[np.logical_not(larger_mask)] = np.nan
    outer_shell = larger_mask.copy()
    outer_shell[1:-1, 1:-1, 1:-1] = np.logical_xor(new_mask, mask)
    outer_shell_x, outer_shell_y, outer_shell_z = np.where(outer_shell)
    extrapolation = list()
    for i, j, k in [(1, 0, 0), (-1, 0, 0), 
                    (0, 1, 0), (0, -1, 0),
                    (0, 0, 1), (0, 0, -1)]:
        this_x = outer_shell_x + i
        this_y = outer_shell_y + j
        this_z = outer_shell_z + k
        extrapolation.append(masked_data[this_x, this_y, this_z])

    extrapolation = np.array(extrapolation)
    extrapolation = (np.nansum(extrapolation, axis=0)
                     / np.sum(np.isfinite(extrapolation), axis=0))
    extrapolation[np.logical_not(np.isfinite(extrapolation))] = 0
    new_data = np.zeros_like(masked_data)
    new_data[outer_shell] = extrapolation
    new_data[larger_mask] = masked_data[larger_mask]
    return new_data[1:-1, 1:-1, 1:-1], new_mask
예제 #12
0
def is_leap_year(year, gregorian=True):
    """Return True if this is a leap year in the Julian or Gregorian calendars

    Arguments:
      - `year` : (int) year

    Keywords:
      - `gregorian` : (bool, default=True) If True, use Gregorian calendar,
        else use Julian calendar

    Returns:
      - (bool) True is this is a leap year, else False.

    """
    year = np.atleast_1d(year).astype(np.int64)
    x = np.fmod(year, 4)
    if gregorian:
        x = np.fmod(year, 4)
        y = np.fmod(year, 100)
        z = np.fmod(year, 400)
        return _scalar_if_one(
            np.logical_and(np.logical_not(x),
                           np.logical_or(y, np.logical_not(z))))
    else:
        return _scalar_if_one(x == 0)
예제 #13
0
def binary_hit_or_miss(input, structure1 = None, structure2 = None,
                       output = None, origin1 = 0, origin2 = None):
    """Multi-dimensional binary hit-or-miss transform.

    An output array can optionally be provided. The origin parameters
    controls the placement of the structuring elements. If the first
    structuring element is not given one is generated with a squared
    connectivity equal to one. If the second structuring element is
    not provided, it set equal to the inverse of the first structuring
    element. If the origin for the second structure is equal to None
    it is set equal to the origin of the first.
    """
    input = numpy.asarray(input)
    if structure1 is None:
        structure1 = generate_binary_structure(input.ndim, 1)
    if structure2 is None:
        structure2 = numpy.logical_not(structure1)
    origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
    if origin2 is None:
        origin2 = origin1
    else:
        origin2 = _ni_support._normalize_sequence(origin2, input.ndim)

    tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
                           0, False)
    inplace = isinstance(output, numpy.ndarray)
    result = _binary_erosion(input, structure2, 1, None, output, 0,
                             origin2, 1, False)
    if inplace:
        numpy.logical_not(output, output)
        numpy.logical_and(tmp1, output, output)
    else:
        numpy.logical_not(result, result)
        return numpy.logical_and(tmp1, result)
예제 #14
0
def rms_diff(vec1, vec2, tol):
    mask = np.logical_and(np.logical_not(np.isnan(vec1)), np.logical_not(np.isnan(vec2)))
    vec1 = vec1[mask]
    vec2 = vec2[mask]
    err = np.sqrt(np.mean(((vec1 - vec2) / vec2) ** 2))
    print "RMS Error: ", err, "(> ", tol, ")"
    return err < tol
예제 #15
0
def max_diff(vec1, vec2, tol):
    mask = np.logical_and(np.logical_not(np.isnan(vec1)), np.logical_not(np.isnan(vec2)))
    vec1 = vec1[mask]
    vec2 = vec2[mask]
    err = np.max(np.abs((vec1 - vec2)))
    print "Max Diff: ", err, "(> ", tol, ")"
    return err < tol
예제 #16
0
 def applyMorphologicalCleaning(self, image):
 	"""
 	Applies a variety of morphological operations to improve the detection
 	of worms in the image.
 	Takes 0.030 s on MUSSORGSKY for a typical frame region
 	Takes 0.030 s in MATLAB too
 	"""
     # start with worm == 1
     image = image.copy()
     segmentation.clear_border(image)  # remove objects at edge (worm == 1)
     # fix defects in the thresholding by closing with a worm-width disk
     # worm == 1
     wormSE = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                        (self.wormDiskRadius+1,
                                        	self.wormDiskRadius+1))
     imcl = cv2.morphologyEx(np.uint8(image), cv2.MORPH_CLOSE, wormSE)
     imcl = np.equal(imcl, 1)
     # fix defects by filling holes
     imholes = ndimage.binary_fill_holes(imcl)
     imcl = np.logical_or(imholes, imcl)
     # fix barely touching regions
     # majority with worm pixels == 1 (median filter same?)
     imcl = nf.median_filter(imcl, footprint=[[1, 1, 1],
                                              [1, 0, 1],
                                              [1, 1, 1]])
     # diag with worm pixels == 0
     imcl = np.logical_not(bwdiagfill(np.logical_not(imcl)))
     # open with worm pixels == 1
     openSE = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
     imcl = cv2.morphologyEx(np.uint8(imcl), cv2.MORPH_OPEN, openSE)
     return np.equal(imcl, 1)
예제 #17
0
파일: paper1.py 프로젝트: wafels/rednoise
def fix_nonfinite(data):
    bad_indexes = np.logical_not(np.isfinite(data))
    good_indexes = np.logical_not(bad_indexes)
    good_data = data[good_indexes]
    interpolated = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data)
    data[bad_indexes] = interpolated
    return data
def load_data():
    # Read file content
    training_file_content = pd.read_csv(TRAINING_FILE_PATH)
    testing_file_content = pd.read_csv(TESTING_FILE_PATH)
    combined_file_content = pd.concat([training_file_content, testing_file_content])

    # Manipulate file content
    X = combined_file_content.drop([ID_COLUMN_NAME, LABEL_COLUMN_NAME], axis=1).as_matrix()
    categorical_features_mask_list = []
    for column_vector in X.T:
        valid_elements_mask = np.logical_not(pd.isnull(column_vector))
        if np.can_cast(type(column_vector[valid_elements_mask][0]), np.float):
            categorical_features_mask_list.append(False)
            min_value = np.min(column_vector[valid_elements_mask])
            column_vector[np.logical_not(valid_elements_mask)] = min_value - 1
        else:
            categorical_features_mask_list.append(True)
            column_vector[np.logical_not(valid_elements_mask)] = "Missing"
            column_vector[:] = perform_categorization(column_vector)
    encoder = OneHotEncoder(categorical_features=categorical_features_mask_list)
    X = encoder.fit_transform(X).toarray()

    # Separate the data set
    Y = combined_file_content[LABEL_COLUMN_NAME].as_matrix()
    ID = combined_file_content[ID_COLUMN_NAME].as_matrix()
    test_data_mask = pd.isnull(Y)
    X_train = X[np.logical_not(test_data_mask)]
    Y_train = Y[np.logical_not(test_data_mask)]
    X_test = X[test_data_mask]
    ID_test = ID[test_data_mask]

    return X_train, Y_train, X_test, ID_test
예제 #19
0
파일: case.py 프로젝트: LucMiaz/KG
 def compare(self, result, t , noiseType = 'Z', sum = True, full=True):
     """Compares the discretization of this case with the one of an algorithm whose results are given in otherdisc. timeparam variable contains the variables for the discretization. Returns a dictionnary with the number of True positives, True negatives, False positives and False negatives"""
     #restrict comparation between Tb and Te
     
     otherdisc=result
     try:
         fulldisc=self.case[noiseType].discretize(t)
         intdisc=[int(b) for b in fulldisc]
         assert( len(otherdisc) == len(fulldisc) )
         #assert(not any([i==None for i in disc]))
     except AssertionError:
         print('something wrong in function of ', self)
     if full:
         disc=fulldisc
     else: 
         mask = np.logical_and(t >= self.case['Tb'], t <= self.case['Te'])
         otherdisc = result[mask]
         disc = fulldisc[mask]
         t=t[mask]
     retTF={}
     retTF['TP'] = np.logical_and(otherdisc,disc)
     retTF['TN'] = np.logical_and(np.logical_not(otherdisc), np.logical_not(disc))
     retTF['FP'] = np.logical_and(otherdisc, np.logical_not(disc))
     retTF['FN'] = np.logical_and(np.logical_not(otherdisc),  disc)
     if sum:
         for k, v in retTF.items():
             retTF[k]= int(v.sum())
     else:
         retTF['t'] = t
         retTF['disc'] = disc
     return retTF, intdisc
예제 #20
0
def fitAndPredict(X, y, train_mask):
    #  partition into train/test set
    Xtrain = X[train_mask]
    ytrain = y[train_mask]
    Xtest = X[np.logical_not(train_mask)]
    ytest = y[np.logical_not(train_mask)]
    
    # Fit model
    linreg = lm.LinearRegression(fit_intercept = False)
    linreg.fit(Xtrain, ytrain)
    
    # Extract parameters
    coef = linreg.coef_
    names = X.columns.tolist()
    print([name + ':' + str(round(w,3)) for name, w in zip(names, coef)])
    
    # Measure train error
    yhat = linreg.predict(Xtrain)
    (mse, stderr) = L2loss(yhat, ytrain) 
    print 'Train mse {0:0.3f}, stderr {1:0.3f}'.format(mse, stderr) 
    
    # Measure test error
    yhat = linreg.predict(Xtest)
    (mse, stderr) = L2loss(yhat, ytest) 
    print 'Test mse {0:0.3f}, stderr {1:0.3f}'.format(mse, stderr) 
예제 #21
0
  def _compute_health_pill(self, x):
    x_clean = x[np.where(
        np.logical_and(
            np.logical_not(np.isnan(x)), np.logical_not(np.isinf(x))))]
    if np.size(x_clean):
      x_min = np.min(x_clean)
      x_max = np.max(x_clean)
      x_mean = np.mean(x_clean)
      x_var = np.var(x_clean)
    else:
      x_min = np.inf
      x_max = -np.inf
      x_mean = np.nan
      x_var = np.nan

    return np.array([
        1.0,  # Assume is initialized.
        np.size(x),
        np.sum(np.isnan(x)),
        np.sum(x == -np.inf),
        np.sum(np.logical_and(x < 0.0, x != -np.inf)),
        np.sum(x == 0.0),
        np.sum(np.logical_and(x > 0.0, x != np.inf)),
        np.sum(x == np.inf),
        x_min,
        x_max,
        x_mean,
        x_var,
        float(tf.as_dtype(x.dtype).as_datatype_enum),
        float(len(x.shape)),
    ] + list(x.shape))
예제 #22
0
 def run(self, outputs_requested, **kwargs):
     # TODO find some interface that doesn't involve string parsing
     # modeled after pandas.Dataframe.query:
     #     http://pandas.pydata.org/pandas-docs/dev/generated/pandas.DataFrame.query.html
     # which implements its own computation engine:
     #     http://pandas.pydata.org/pandas-docs/dev/generated/pandas.eval.html
     # supports numpy arithmetic comparison operators:
     #     http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#arithmetic-and-comparison-operations
     in_table = kwargs['input'].to_np()
     col_names = in_table.dtype.names
     query = self.__get_ast(col_names)
     mask = eval(compile(query, '<string>', 'eval'))
     ret = {}
     if 'output' in outputs_requested:
         uo_out = UObject(UObjectPhase.Write)
         uo_out.from_np(in_table[mask])
         ret['output'] = uo_out
     if 'complement' in outputs_requested:
         uo_comp = UObject(UObjectPhase.Write)
         uo_comp.from_np(in_table[np.logical_not(mask)])
         ret['complement'] = uo_comp
     if 'output_inds' in outputs_requested:
         uo_out_inds = UObject(UObjectPhase.Write)
         uo_out_inds.from_np(np.where(mask)[0])
         ret['output_inds'] = uo_out_inds
     if 'complement_inds' in outputs_requested:
         uo_comp_inds = UObject(UObjectPhase.Write)
         uo_comp_inds.from_np(np.where(np.logical_not(mask))[0])
         ret['complement_inds'] = uo_comp_inds
     return ret
예제 #23
0
파일: tmfit.py 프로젝트: wj2/vplt-analysis
def compare_fixlens(samp_fixlen, fixlendist, eps=.000000001):
    nonan_samp_fixlen = samp_fixlen[np.logical_not(np.isnan(samp_fixlen))]
    nonan_fixlendist = fixlendist[np.logical_not(np.isnan(fixlendist))]
    print nonan_samp_fixlen, nonan_fixlendist
    ks, p = sts.ks_2samp(nonan_samp_fixlen, nonan_fixlendist)
    print ks, p
    return np.log(p + eps)
예제 #24
0
파일: pelvis.py 프로젝트: rprospero/PelVis
 def updateSingle(self,event=None):
     """Update the 2D data for the region of interest and intensity"""
     (vMin,vMax) = self.opPanel.getIntensityRange()
     (xMin,xMax,yMin,yMax) = self.opPanel.getRoi()
     data = self.flatdata[:,:]
   
     #Mask to zero during the summing parts
     data[np.logical_not(self.mask)] = 0
     self.posPanel.data = data
     self.posPanel.setRange(xMin,yMin,xMax,yMax)
     x=np.arange(128,0,-1)
     y=np.sum(data[:,xMin:xMax],axis=1)
     self.yPanel.SetPlot(x,y)
     #handle the x-plot
     x=np.arange(0,16,1)
     y=np.sum(data[yMin:yMax,:],axis=0)
     self.xPanel.SetPlot(x,y)
     if vMin is None:
         vMin = np.min(data)
     if vMax is None:
         vMax = np.max(data)
     self.colorbar.setRange(vMin,vMax)
     self.colorbar.update()
     #mask to vmin for the plotting
     data[np.logical_not(self.mask)] = vMin
     self.imPanel.update(self.flatdata,vMin,vMax)
 def build_tree_vector(points_r,points_c,levels_left,local_out_array):
     
     tile_rs = tile[points_r,points_c].reshape( -1,fs);
     local_out_array[0,:] = ma.mean(tile_rs,axis=0)
     
         #plt.plot(points_r,points_c,'o')
     if levels_left > 1:
         remaining_out_array = local_out_array[1:,:]
         mean_r = np.mean(points_r);
         mean_c = np.mean(points_c)
         
         offset_size = remaining_out_array.shape[0]/4
 
         top = points_r < mean_r
         bottom = np.logical_not(top)
         left = points_c < mean_c
         right = np.logical_not(left)
         
         quadrents = [ (top,right),(top,left),(bottom,left),(bottom,right)  ]
         
         #Fill the solution for all 4 quadrents 
         for idx,quadrent in enumerate(quadrents):
             q = np.logical_and(quadrent[0],quadrent[1])
             q_out = remaining_out_array[ idx*offset_size : (idx+1)*offset_size, : ]
             build_tree_vector(points_r[q],points_c[q],levels_left - 1,q_out)
         #renormilize 
         remaining_out_array *= .25
예제 #26
0
    def resample(self):
        """
        :return:
            Return the data with majority samples that form a Tomek link
            removed.
        """

        from sklearn.neighbors import NearestNeighbors

        # Find the nearest neighbour of every point
        nn = NearestNeighbors(n_neighbors=2)
        nn.fit(self.x)
        nns = nn.kneighbors(self.x, return_distance=False)[:, 1]

        # Send the information to is_tomek function to get boolean vector back
        if self.verbose:
            print("Looking for majority Tomek links...")
        links = self.is_tomek(self.y, nns, self.minc, self.verbose)

        if self.verbose:
            print("Under-sampling "
                  "performed: " + str(Counter(self.y[logical_not(links)])))

        # Return data set without majority Tomek links.
        return self.x[logical_not(links)], self.y[logical_not(links)]
예제 #27
0
    def data(self, t=None, extrapolate=np.nan, return_indices=False):
        if t is None:
            d = self.D
            ix = np.arange(len(d))
        else:
            t = np.array(t)
            t0 = self.starttime()
            t1 = self.endtime()
            ix = np.array(np.round((t-t0)/self.dT)).astype(int)
            in_range = np.logical_and(t>=t0, t<= t1)

            if extrapolate is None:
                ix = ix[in_range]
            elif extrapolate is False:
                ix[t<t0] = 0
                ix[t>t1] = self.nD
            else:
                if any(np.logical_not(in_range)):
                    ix[np.logical_not(in_range)] = extrapolate

            d = selectalonglastdimension(self.D,ix)
        if return_indices:
            return (d,ix)
        else:
            return d
예제 #28
0
파일: metrics.py 프로젝트: Conxz/nipype
    def _eucl_max(self, nii1, nii2):
        origdata1 = nii1.get_data()
        origdata1 = np.logical_not(
            np.logical_or(origdata1 == 0, np.isnan(origdata1)))
        origdata2 = nii2.get_data()
        origdata2 = np.logical_not(
            np.logical_or(origdata2 == 0, np.isnan(origdata2)))

        if isdefined(self.inputs.mask_volume):
            maskdata = nb.load(self.inputs.mask_volume).get_data()
            maskdata = np.logical_not(
                np.logical_or(maskdata == 0, np.isnan(maskdata)))
            origdata1 = np.logical_and(maskdata, origdata1)
            origdata2 = np.logical_and(maskdata, origdata2)

        if origdata1.max() == 0 or origdata2.max() == 0:
            return np.NaN

        border1 = self._find_border(origdata1)
        border2 = self._find_border(origdata2)

        set1_coordinates = self._get_coordinates(border1, nii1.affine)
        set2_coordinates = self._get_coordinates(border2, nii2.affine)
        distances = cdist(set1_coordinates.T, set2_coordinates.T)
        mins = np.concatenate(
            (np.amin(distances, axis=0), np.amin(distances, axis=1)))

        return np.max(mins)
예제 #29
0
def variable_recomb_example():
    infile = "../hapmap/genetic_map_GRCh37_chr22.txt"
    # Read in the recombination map using the read_hapmap method,
    recomb_map = msprime.RecombinationMap.read_hapmap(infile)

    # Now we get the positions and rates from the recombination
    # map and plot these using 500 bins.
    positions = np.array(recomb_map.get_positions()[1:])
    rates = np.array(recomb_map.get_rates()[1:])
    num_bins = 500
    v, bin_edges, _ = scipy.stats.binned_statistic(
        positions, rates, bins=num_bins)
    x = bin_edges[:-1][np.logical_not(np.isnan(v))]
    y = v[np.logical_not(np.isnan(v))]
    fig, ax1 = pyplot.subplots(figsize=(16, 6))
    ax1.plot(x, y, color="blue")
    ax1.set_ylabel("Recombination rate")
    ax1.set_xlabel("Chromosome position")

    # Now we run the simulation for this map. We assume Ne=10^4
    # and have a sample of 100 individuals
    tree_sequence = msprime.simulate(
        sample_size=100,
        Ne=10**4,
        recombination_map=recomb_map)
    # Now plot the density of breakpoints along the chromosome
    breakpoints = np.array(list(tree_sequence.breakpoints()))
    ax2 = ax1.twinx()
    v, bin_edges = np.histogram(breakpoints, num_bins, density=True)
    ax2.plot(bin_edges[:-1], v, color="green")
    ax2.set_ylabel("Breakpoint density")
    ax2.set_xlim(1.5e7, 5.3e7)
    fig.savefig("_static/hapmap_chr22.svg")
예제 #30
0
def find_large_empty_regions(grayscale_vol, min_background_voxel_count=100):
    """
    Returns mask that excludes large background (0-valued) regions, if any exist.
    """    
    if not (grayscale_vol == 0).any():
        # No background pixels.
        # We could return all ones, but we are also allowed
        # by convention to return 'None', which is faster.
        return None

    # Produce a mask that excludes 'background' pixels
    # (typically zeros around the volume edges)
    background_mask = numpy.zeros(grayscale_vol.shape, dtype=numpy.uint8)
    background_mask[grayscale_vol == 0] = 1

    # Compute connected components (cc) and toss out the small components
    cc = scipy.ndimage.label(background_mask)[0]
    cc_sizes = numpy.bincount(cc.ravel())
    small_cc_selections = cc_sizes < min_background_voxel_count
    small_cc_locations = small_cc_selections[cc]
    background_mask[small_cc_locations] = 0

    if not background_mask.any():
        # No background pixels.
        # We could return all ones, but we are also allowed
        # by convention to return 'None', which is faster.
        return None
    
    # Now background_mask == 1 for background and 0 elsewhere, so invert.
    numpy.logical_not(background_mask, out=background_mask)
    return background_mask.view(numpy.bool_)
 def get_predictions(self, itype, image_id,
                     return_imgs = False, avoid_read_weights=False, return_score = False, 
                     use_cache=None, force_save=False, verbose=True):
     
     start_time_L1 = time.time()
     use_cache = self.Data.exec_settings['cache'] == "True" if use_cache is None else use_cache
     pred = None
     score = None
     score_txt = 'R2_score'
     
     if use_cache & (not force_save):
         try:
             file_to_load = os.path.join(self.path_predictions, itype, '{}_{}_pred.npy.gz'.format(itype, image_id))
             with gzip.open(file_to_load, 'rb') as f:
                 pred = np.load(f)
             if not return_imgs:
                 if verbose:
                     print("Read prediction {}_{} in {:.2f} s".format(itype, image_id, 
                           (time.time() - start_time_L1)/1))
                 return pred
         except:
             if verbose:
                 print("File not in cache")
                 
     imgs, labels, info = self.read_image(itype, image_id, frame = 'all', split_wrap_imgs = True,
                                      read_labels=(itype=='train'), verbose=verbose)
     
     if pred is None:
         
         #get weights
         if (self.weights_file is None) or not avoid_read_weights:
             self.dsetID = ld.read_dsetID() if self.dsetID is None else self.dsetID
             fold_id = self.dsetID.loc[(self.dsetID.video_id == image_id) & (self.dsetID.itype == itype), 
                                       self.fold_column]
             fold_id = fold_id.values[0]
             if self.prev_foldID != fold_id:
                 weight_file = self.weights_format.format(fold_id=fold_id)
                 self.load_weights(weight_file, verbose=verbose)
                 self.prev_foldID = fold_id            
         
         # predict
         pred = self.predict_BATCH(imgs)
         
         # Save cache
         if use_cache|force_save:
             if not os.path.exists(os.path.join(self.path_predictions, itype)):
                 os.makedirs(os.path.join(self.path_predictions, itype))
             file_to_save = os.path.join(self.path_predictions, itype, '{}_{}_pred.npy'.format(itype, image_id))    
             np.save(file_to_save, pred)
             with open(file_to_save, 'rb') as f_in, gzip.open(file_to_save + '.gz', 'wb') as f_out:
                 shutil.copyfileobj(f_in, f_out)
             os.remove(file_to_save)
                     
     
     # evaluate
     if labels is not None:
         from sklearn.metrics import r2_score
         np_labels = np.vstack(labels)[:,0]
         np_preds = pred[:,0]
         y_true = (np_labels[np.logical_not(np.isnan(np_labels))]).astype(np.float32)
         y_pred = (np_preds[np.logical_not(np.isnan(np_labels))]).astype(np.float32)
         score = r2_score(y_true, y_pred)
     
     if verbose: 
         if score is not None:
             print("Read prediction {}_{} ({}: {:.5f}) in {:.2f} s".format(itype, image_id, score_txt, score, 
                   (time.time() - start_time_L1)/1))        
         else:
             print("Read prediction {}_{} in {:.2f} s".format(itype, image_id, (time.time() - start_time_L1)/1))        
     
     if return_imgs:
         if return_score:
             return pred, imgs, labels, score
         else:
             return pred, imgs, labels
         
     if return_score:
         return pred,  score
     else:
         return pred
예제 #32
0
    else:
        print(
            " +              Computing static S0 and t2s maps from the data.")
        # Do a non-linear optimization to fit the original curve (no log-linear transformation)
        # using an optimization algorithm that takes boundaries.
        S0, t2s, SSE, mask_bad_staticFit = meb.make_static_maps_opt(
            SME_mean,
            tes,
            Ncpu,
            So_init=So_init,
            T2s_init=T2s_init,
            So_min=So_min,
            So_max=So_max,
            T2s_min=T2s_min,
            T2s_max=T2s_max)
        mask_bad_staticFit = np.logical_not(mask_bad_staticFit)
        meb.niiwrite_nv(S0, mask, stFit_S0_path, mepi_aff, mepi_head)
        meb.niiwrite_nv(t2s, mask, stFit_t2s_path, mepi_aff, mepi_head)
        meb.niiwrite_nv(SSE, mask, stFit_SSE_path, mepi_aff, mepi_head)
        meb.niiwrite_nv(
            mask_bad_staticFit, mask, stFit_bVx_path, mepi_aff, mepi_head
        )  # In mask_bad_staticFit, voxels that should be discarded are = 1 both in Python and in the associated AFNI brik
        # There is a simpler, yet less accurate way to do this.
        # Do a log-linear fit. The lack of boundaries and the log-linear transformation leads to
        # some voxels having completely wrong values for TE and S0. The benefit of this is that
        # this method is really fast.
        # S0,t2s,_,_ = meb.make_static_maps(S,tes)

    # Compute Linear Fit to Remove non-S0/T2* signals
    # -----------------------------------------------
    print("++ INFO [Main]: Remove non-S0/T2* signals....")
예제 #33
0
    def generate_fva_warmup(self):
        """Generate the warmup points for the sampler.

        Generates warmup points by setting each flux as the sole objective
        and minimizing/maximizing it. Also caches the projection of the
        warmup points into the nullspace for non-homogeneous problems (only
        if necessary).
        """
        self.n_warmup = 0
        reactions = self.model.reactions
        self.warmup = np.zeros((2 * len(reactions), len(self.model.variables)))
        self.model.objective = Zero
        for sense in ("min", "max"):
            self.model.objective_direction = sense
            for i, r in enumerate(reactions):
                variables = (self.model.variables[self.fwd_idx[i]],
                             self.model.variables[self.rev_idx[i]])
                # Omit fixed reactions if they are non-homogeneous
                if r.upper_bound - r.lower_bound < bounds_tol:
                    LOGGER.info("skipping fixed reaction %s" % r.id)
                    continue
                self.model.objective.set_linear_coefficients({
                    variables[0]: 1,
                    variables[1]: -1
                })
                self.model.slim_optimize()
                if not self.model.solver.status == OPTIMAL:
                    LOGGER.info("can not maximize reaction %s, skipping it" %
                                r.id)
                    continue
                primals = self.model.solver.primal_values
                sol = [primals[v.name] for v in self.model.variables]
                self.warmup[self.n_warmup, ] = sol
                self.n_warmup += 1
                # Reset objective
                self.model.objective.set_linear_coefficients({
                    variables[0]: 0,
                    variables[1]: 0
                })
        # Shrink to measure
        self.warmup = self.warmup[0:self.n_warmup, :]
        # Remove redundant search directions
        keep = np.logical_not(self._is_redundant(self.warmup))
        self.warmup = self.warmup[keep, :]
        self.n_warmup = self.warmup.shape[0]

        # Catch some special cases
        if len(self.warmup.shape) == 1 or self.warmup.shape[0] == 1:
            raise ValueError("Your flux cone consists only of a single point!")
        elif self.n_warmup == 2:
            if not self.problem.homogeneous:
                raise ValueError("Can not sample from an inhomogenous problem"
                                 " with only 2 search directions :(")
            LOGGER.info("All search directions on a line, adding another one.")
            newdir = self.warmup.T.dot([0.25, 0.25])
            self.warmup = np.vstack([self.warmup, newdir])
            self.n_warmup += 1

        # Shrink warmup points to measure
        self.warmup = shared_np_array(
            (self.n_warmup, len(self.model.variables)), self.warmup)
예제 #34
0
    def evaluate(self) -> Tuple[TrackingMetrics, TrackingMetricDataList]:
        """
        Performs the actual evaluation.
        :return: A tuple of high-level and the raw metric data.
        """
        start_time = time.time()
        metrics = TrackingMetrics(self.cfg)

        # -----------------------------------
        # Step 1: Accumulate metric data for all classes and distance thresholds.
        # -----------------------------------
        if self.verbose:
            print('Accumulating metric data...')
        metric_data_list = TrackingMetricDataList()

        def accumulate_class(curr_class_name):
            curr_ev = TrackingEvaluation(self.tracks_gt, self.tracks_pred, curr_class_name, self.cfg.dist_fcn_callable,
                                         self.cfg.dist_th_tp, self.cfg.min_recall,
                                         num_thresholds=TrackingMetricData.nelem,
                                         metric_worst=self.cfg.metric_worst,
                                         verbose=self.verbose,
                                         output_dir=self.output_dir,
                                         render_classes=self.render_classes)
            curr_md = curr_ev.accumulate()
            metric_data_list.set(curr_class_name, curr_md)

        for class_name in self.cfg.class_names:
            accumulate_class(class_name)

        # -----------------------------------
        # Step 2: Aggregate metrics from the metric data.
        # -----------------------------------
        if self.verbose:
            print('Calculating metrics...')
        for class_name in self.cfg.class_names:
            # Find best MOTA to determine threshold to pick for traditional metrics.
            # If multiple thresholds have the same value, pick the one with the highest recall.
            md = metric_data_list[class_name]
            if np.all(np.isnan(md.mota)):
                best_thresh_idx = None
            else:
                best_thresh_idx = np.nanargmax(md.mota)

            # Pick best value for traditional metrics.
            if best_thresh_idx is not None:
                for metric_name in MOT_METRIC_MAP.values():
                    if metric_name == '':
                        continue
                    value = md.get_metric(metric_name)[best_thresh_idx]
                    metrics.add_label_metric(metric_name, class_name, value)

            # Compute AMOTA / AMOTP.
            for metric_name in AVG_METRIC_MAP.keys():
                values = np.array(md.get_metric(AVG_METRIC_MAP[metric_name]))
                assert len(values) == TrackingMetricData.nelem

                if np.all(np.isnan(values)):
                    # If no GT exists, set to nan.
                    value = np.nan
                else:
                    # Overwrite any nan value with the worst possible value.
                    np.all(values[np.logical_not(np.isnan(values))] >= 0)
                    values[np.isnan(values)] = self.cfg.metric_worst[metric_name]
                    value = float(np.nanmean(values))
                metrics.add_label_metric(metric_name, class_name, value)

        # Compute evaluation time.
        metrics.add_runtime(time.time() - start_time)

        return metrics, metric_data_list
예제 #35
0
def coupling_layer(x,
                   layer_name,
                   layer_index,
                   is_training,
                   n_layers,
                   layer_width,
                   parameters=None,
                   inverse=False,
                   use_first_layer_feats=False,
                   net_type='mlp',
                   use_coord_offset=False,
                   dim=3):

    with tf.variable_scope(layer_name, reuse=inverse):
        n_channels = x.shape[1]
        split = n_channels // 2
        if use_coord_offset:
            n_channels = dim
            split = n_channels // 2
            mask1 = (np.arange(n_channels) + layer_index) % n_channels < split
            mask = np.arange(n_channels)[mask1]
            not_mask = np.arange(n_channels)[np.logical_not(mask1)]
            x_unstacked = tf.unstack(x, axis=1)
            xA = tf.stack([x_unstacked[t] for t in mask], axis=1)
            xB = tf.stack([x_unstacked[t] for t in not_mask], axis=1)
        else:
            odd = layer_index % 2 == 0
            if odd:
                xA = x[:, :split]
                xB = x[:, split:]
            else:
                xA = x[:, split:]
                xB = x[:, :split]

        # 1. Pass through the first part
        yA = xA

        # 2. Remap second part
        if net_type == 'mlp':
            net_fun = models.nn.multilayer_fcn
        elif net_type == 'resnet':
            net_fun = models.nn.multilayer_resnet
        elif net_type == 'legacyresnet':
            net_fun = models.nn.multilayer_resnet_legacy

        if use_first_layer_feats:
            net_result = net_fun(tf.concat([xA, parameters], axis=1),
                                 is_training,
                                 None,
                                 n_layers,
                                 layer_width,
                                 use_batch_norm=False,
                                 reuse=inverse,
                                 name='m_net')
        else:
            net_result = net_fun(xA,
                                 is_training,
                                 parameters,
                                 n_layers,
                                 layer_width,
                                 use_batch_norm=False,
                                 reuse=inverse,
                                 name='m_net')

        n_params = int(xB.shape[1])
        # TODO: These could maybe also be depending on parameters?
        s = tf.contrib.layers.fully_connected(net_result,
                                              n_params,
                                              scope='s_fun',
                                              reuse=inverse,
                                              activation_fn=None)
        t = tf.contrib.layers.fully_connected(net_result,
                                              n_params,
                                              scope='t_fun',
                                              reuse=inverse,
                                              activation_fn=None)

        # Rescale s before applying the exponential to prevent overflow issues
        scale = tf.get_variable("rescaling_scale", [],
                                initializer=tf.constant_initializer(0.),
                                trainable=True)
        s = scale * tf.tanh(s)

        if inverse:
            yB = (xB - t) * tf.exp(-s)
        else:
            yB = xB * tf.exp(s) + t

        # Recombine data into one vector
        if use_coord_offset:
            yA_unstacked = tf.unstack(yA, axis=1)
            yB_unstacked = tf.unstack(yB, axis=1)
            out = [0] * dim
            for i, m in enumerate(mask):
                out[m] = yA_unstacked[i]
            for i, m in enumerate(not_mask):
                out[m] = yB_unstacked[i]
            out = tf.stack(out, axis=1)
        else:
            if odd:
                out = tf.concat([yA, yB], axis=1)
            else:
                out = tf.concat([yB, yA], axis=1)

        if inverse:  # Also return the exponential term which is part of the Jacobian of the sampling strategy
            return out, tf.reduce_sum(s, axis=1)
        else:
            return out
예제 #36
0
def numeric_summary(tensor):
    """Get a text summary of a numeric tensor.

  This summary is only available for numeric (int*, float*, complex*) and
  Boolean tensors.

  Args:
    tensor: (`numpy.ndarray`) the tensor value object to be summarized.

  Returns:
    The summary text as a `RichTextLines` object. If the type of `tensor` is not
    numeric or Boolean, a single-line `RichTextLines` object containing a
    warning message will reflect that.
  """
    def _counts_summary(counts, skip_zeros=True, total_count=None):
        """Format values as a two-row table."""
        if skip_zeros:
            counts = [(count_key, count_val) for count_key, count_val in counts
                      if count_val]
        max_common_len = 0
        for count_key, count_val in counts:
            count_val_str = str(count_val)
            common_len = max(len(count_key) + 1, len(count_val_str) + 1)
            max_common_len = max(common_len, max_common_len)

        key_line = debugger_cli_common.RichLine("|")
        val_line = debugger_cli_common.RichLine("|")
        for count_key, count_val in counts:
            count_val_str = str(count_val)
            key_line += _pad_string_to_length(count_key, max_common_len)
            val_line += _pad_string_to_length(count_val_str, max_common_len)
        key_line += " |"
        val_line += " |"

        if total_count is not None:
            total_key_str = "total"
            total_val_str = str(total_count)
            max_common_len = max(len(total_key_str) + 1, len(total_val_str))
            total_key_str = _pad_string_to_length(total_key_str,
                                                  max_common_len)
            total_val_str = _pad_string_to_length(total_val_str,
                                                  max_common_len)
            key_line += total_key_str + " |"
            val_line += total_val_str + " |"

        return debugger_cli_common.rich_text_lines_from_rich_line_list(
            [key_line, val_line])

    if not isinstance(tensor, np.ndarray) or not np.size(tensor):
        return debugger_cli_common.RichTextLines(
            ["No numeric summary available due to empty tensor."])
    elif (np.issubdtype(tensor.dtype, np.floating)
          or np.issubdtype(tensor.dtype, np.complex)
          or np.issubdtype(tensor.dtype, np.integer)):
        counts = [("nan", np.sum(np.isnan(tensor))),
                  ("-inf", np.sum(np.isneginf(tensor))),
                  ("-",
                   np.sum(
                       np.logical_and(tensor < 0.0,
                                      np.logical_not(np.isneginf(tensor))))),
                  ("0", np.sum(tensor == 0.0)),
                  ("+",
                   np.sum(
                       np.logical_and(tensor > 0.0,
                                      np.logical_not(np.isposinf(tensor))))),
                  ("+inf", np.sum(np.isposinf(tensor)))]
        output = _counts_summary(counts, total_count=np.size(tensor))

        valid_array = tensor[np.logical_not(
            np.logical_or(np.isinf(tensor), np.isnan(tensor)))]
        if np.size(valid_array):
            stats = [("min", np.min(valid_array)),
                     ("max", np.max(valid_array)),
                     ("mean", np.mean(valid_array)),
                     ("std", np.std(valid_array))]
            output.extend(_counts_summary(stats, skip_zeros=False))
        return output
    elif tensor.dtype == np.bool:
        counts = [
            ("False", np.sum(tensor == 0)),
            ("True", np.sum(tensor > 0)),
        ]
        return _counts_summary(counts, total_count=np.size(tensor))
    else:
        return debugger_cli_common.RichTextLines([
            "No numeric summary available due to tensor dtype: %s." %
            tensor.dtype
        ])
예제 #37
0
파일: base.py 프로젝트: chrinide/pyiron
 def _set_selective_dynamics(self):
     if "selective_dynamics" in self.structure._tag_list.keys():
         if self.structure.selective_dynamics._default is None:
             self.structure.selective_dynamics._default = [True, True, True]
         sel_dyn = np.logical_not(self.structure.selective_dynamics.list())
         # Enter loop only if constraints present
         if len(np.argwhere(np.any(sel_dyn, axis=1)).flatten()) != 0:
             all_indices = np.arange(len(self.structure), dtype=int)
             constraint_xyz = np.argwhere(np.all(sel_dyn, axis=1)).flatten()
             not_constrained_xyz = np.setdiff1d(all_indices, constraint_xyz)
             # LAMMPS starts counting from 1
             constraint_xyz += 1
             ind_x = np.argwhere(sel_dyn[not_constrained_xyz, 0]).flatten()
             ind_y = np.argwhere(sel_dyn[not_constrained_xyz, 1]).flatten()
             ind_z = np.argwhere(sel_dyn[not_constrained_xyz, 2]).flatten()
             constraint_xy = not_constrained_xyz[np.intersect1d(
                 ind_x, ind_y)] + 1
             constraint_yz = not_constrained_xyz[np.intersect1d(
                 ind_y, ind_z)] + 1
             constraint_zx = not_constrained_xyz[np.intersect1d(
                 ind_z, ind_x)] + 1
             constraint_x = (not_constrained_xyz[np.setdiff1d(
                 np.setdiff1d(ind_x, ind_y), ind_z)] + 1)
             constraint_y = (not_constrained_xyz[np.setdiff1d(
                 np.setdiff1d(ind_y, ind_z), ind_x)] + 1)
             constraint_z = (not_constrained_xyz[np.setdiff1d(
                 np.setdiff1d(ind_z, ind_x), ind_y)] + 1)
             if len(constraint_xyz) > 0:
                 self.input.control[
                     "group___constraintxyz"] = "id " + " ".join(
                         [str(ind) for ind in constraint_xyz])
                 self.input.control[
                     "fix___constraintxyz"] = "constraintxyz setforce 0.0 0.0 0.0"
                 if self._generic_input["calc_mode"] == "md":
                     self.input.control[
                         "velocity___constraintxyz"] = "set 0.0 0.0 0.0"
             if len(constraint_xy) > 0:
                 self.input.control[
                     "group___constraintxy"] = "id " + " ".join(
                         [str(ind) for ind in constraint_xy])
                 self.input.control[
                     "fix___constraintxy"] = "constraintxy setforce 0.0 0.0 NULL"
                 if self._generic_input["calc_mode"] == "md":
                     self.input.control[
                         "velocity___constraintxy"] = "set 0.0 0.0 NULL"
             if len(constraint_yz) > 0:
                 self.input.control[
                     "group___constraintyz"] = "id " + " ".join(
                         [str(ind) for ind in constraint_yz])
                 self.input.control[
                     "fix___constraintyz"] = "constraintyz setforce NULL 0.0 0.0"
                 if self._generic_input["calc_mode"] == "md":
                     self.input.control[
                         "velocity___constraintyz"] = "set NULL 0.0 0.0"
             if len(constraint_zx) > 0:
                 self.input.control[
                     "group___constraintxz"] = "id " + " ".join(
                         [str(ind) for ind in constraint_zx])
                 self.input.control[
                     "fix___constraintxz"] = "constraintxz setforce 0.0 NULL 0.0"
                 if self._generic_input["calc_mode"] == "md":
                     self.input.control[
                         "velocity___constraintxz"] = "set 0.0 NULL 0.0"
             if len(constraint_x) > 0:
                 self.input.control[
                     "group___constraintx"] = "id " + " ".join(
                         [str(ind) for ind in constraint_x])
                 self.input.control[
                     "fix___constraintx"] = "constraintx setforce 0.0 NULL NULL"
                 if self._generic_input["calc_mode"] == "md":
                     self.input.control[
                         "velocity___constraintx"] = "set 0.0 NULL NULL"
             if len(constraint_y) > 0:
                 self.input.control[
                     "group___constrainty"] = "id " + " ".join(
                         [str(ind) for ind in constraint_y])
                 self.input.control[
                     "fix___constrainty"] = "constrainty setforce NULL 0.0 NULL"
                 if self._generic_input["calc_mode"] == "md":
                     self.input.control[
                         "velocity___constrainty"] = "set NULL 0.0 NULL"
             if len(constraint_z) > 0:
                 self.input.control[
                     "group___constraintz"] = "id " + " ".join(
                         [str(ind) for ind in constraint_z])
                 self.input.control[
                     "fix___constraintz"] = "constraintz setforce NULL NULL 0.0"
                 if self._generic_input["calc_mode"] == "md":
                     self.input.control[
                         "velocity___constraintz"] = "set NULL NULL 0.0"
예제 #38
0
             BAfull.add_pixel_points(
                 [[cam_ids[id0 + idx], lmk_id,
                   pxl_pt]])  # pose_id, point_id, measurement
             if args.record_log:
                 log_str = '\t\tedge {} {} {}\n'.format(
                     cam_ids[id0 + idx], lmk_id,
                     nparray2string(pxl_pt,
                                    with_bracket=False,
                                    suppress_small=True))
                 flog.write(log_str)
     print_str += '[Debug]{:2d} long term points, {:3d} for BA_data '.format(
         np.sum(mask_hist_curr), len(BA_data))
     # print('[Debug]{:2d} long term points, {:3d} for BA_data'.format(np.sum(mask_hist_curr), len(BA_data)))
 else:
     mask_hist_curr = np.zeros(points1_curr.shape[0], dtype=bool)
 points1_curr_rest = points1_curr[np.logical_not(mask_hist_curr)]
 points2_curr_rest = points2_curr[np.logical_not(mask_hist_curr)]
 if points1_prev is not None:
     mask_prev, mask_curr = get_match_masks(points1_prev,
                                            points1_curr_rest, dim)
     if args.show_img:
         for pt in points1_curr_rest[mask_curr]:
             img_disp[pt[1], pt[0]] = np.array([1, 1, 1])
     # print('[Debug]{} new points'.format(np.sum(mask_curr)))
     new_points = match_points(points0_prev[mask_prev],
                               points1_prev[mask_prev],
                               points1_curr_rest[mask_curr],
                               points2_curr_rest[mask_curr], dim)
     point_tracks.extend(new_points)
     hist_match = np.vstack(
         (points2_curr[mask_hist_curr], points2_curr_rest[mask_curr]))
예제 #39
0
 def plot(ax, x, y, **kwargs):
     mask = np.logical_not(np.logical_or(np.isnan(x), np.isnan(y)))
     x = x[mask]
     y = y[mask]
     ax.plot(x, y, **kwargs)
예제 #40
0
    def predict(self,
                xvalv,
                predict_CI=[],
                save_posteriors=False,
                drawplots=False,
                output_scores=False):
        if not (len(self.preselected_features) == 0):
            xvalv = xvalv[self.preselected_features]
        xvalv = pd.DataFrame(self.poly.fit_transform(xvalv))
        xvalv = xvalv.get_values()
        yval = []
        yval_CI = []
        yval_posteriors = []
        for xvali in range(xvalv.shape[0]):
            xval = xvalv[xvali, :]
            xval_r = (
                np.nanmean(self.x_train.get_values() < xval, axis=0) +
                0.5 * np.nanmean(self.x_train.get_values() == xval, axis=0)
            )  # rank feature vector relative to training data
            #print('clipping from below:',self.x_train.columns[xval_r==0])
            xval_r[xval_r == 0] = 1 / (
                self.x_train.shape[0] + 1
            )  # clip rankings to avoid out of bound infinities
            #print('clipping from above:',self.x_train.columns[xval_r==1])
            xval_r[xval_r == 1] = 1 - 1 / (self.x_train.shape[0] + 1)
            xval_z = norm.ppf(xval_r)  # compute equivalent Z value
            xval_z = xval_z.reshape(1, -1)

            label = self.logreg.predict_proba(xval_z)
            predict0 = self.model0.predict(xval_z)
            predict1 = self.model1.predict(xval_z)
            yval_z = predict0 * label[:, 0] + predict1 * label[:, 1]
            eps = np.sqrt(
                label[:, 0] * self.eps_std0**2 + label[:, 1] * self.eps_std1**2
            )  # pretend both predictions are independent and just average variances

            ypercentile = norm.cdf(yval_z)
            yval_arg = np.argmin(np.abs(self.y_train_z.get_values() - yval_z))
            yval.append(self.y_train.get_values()[yval_arg])

            if not (predict_CI == []):
                y_axis_values = np.linspace(
                    -2, 16, 500)  # reasonable limmits for book log revenues
                dy = y_axis_values[1] - y_axis_values[0]
                KDE = stats.gaussian_kde(self.y_train.get_values().ravel(),
                                         bw_method=2 /
                                         np.sqrt(len(self.y_train)))
                y_pdf = KDE.pdf(y_axis_values)
                y_cdf = np.cumsum(y_pdf) * dy
                yz = norm.ppf(y_cdf)  # function that goes from Y to Z space
                posterior_yz = np.exp(-.5 * (
                    (yz - yval_z) / eps)**2) / np.sqrt(2 * np.pi * eps**2)
                posterior_y = posterior_yz * self.diff2(
                    yz
                ) / dy  # Change of variables between Z space and Y space trhough the function yz (d(yz)/dy is always positive)
                posterior_y[np.logical_not(np.isfinite(posterior_y))] = 0
                posterior_y = posterior_y / (
                    np.sum(posterior_y) * dy
                )  # correct numerical errors and make sure pdf sums to one
                posterior_y_cdf = np.cumsum(posterior_y) * dy
                expected_revenue = posterior_y.dot(np.exp(y_axis_values) - 1)

                ci_list = []
                for ci in predict_CI:
                    ci_list.append(
                        (max(y_axis_values[posterior_y_cdf < .5 - ci / 2]),
                         min(y_axis_values[posterior_y_cdf > (.5 + ci / 2)])))
                yval_CI.append(ci_list)
                yval_posteriors.append((y_axis_values, posterior_y))

                if drawplots:
                    #Drawing plots also implies computing full posterior distributions
                    #compute change of variables from posterior Zs to posterior Log Revenues

                    plt.hist(self.y_train.get_values(),
                             bins=int(4 *
                                      np.ceil(np.sqrt(self.y_train.shape[0]))),
                             normed=True,
                             alpha=.2)
                    plt.plot(y_axis_values,
                             y_pdf,
                             color=[.2, .2, .7],
                             linewidth=3)
                    plt.hlines([-.05],
                               ci_list[1][0],
                               ci_list[1][1],
                               'r',
                               linewidth=3,
                               alpha=.5)  # plot CIs of prediction
                    plt.hlines([-.05],
                               ci_list[0][0],
                               ci_list[0][1],
                               'r',
                               linewidth=5,
                               alpha=.8)
                    plt.vlines(yval[-1], -.05, -.025, 'r', linewidth=3)
                    plt.plot(y_axis_values, posterior_y, color=[1, 0, 0])
                    plt.hlines([0],
                               y_axis_values[1],
                               y_axis_values[-1],
                               'k',
                               linewidth=1,
                               alpha=1)
                    plt.xticks([
                        0,
                        np.log(10),
                        np.log(100),
                        np.log(1000),
                        np.log(10000),
                        np.log(100000)
                    ], ['1', '10', '100', '1k', '10k', '100k'])

        if not (predict_CI == []):
            if save_posteriors:
                if output_scores:
                    scores = label[:,
                                   0] * self.model0.coef_ * xval_z + label[:,
                                                                           1] * self.model0.coef_ * xval_z
                    scores = pd.DataFrame(scores.ravel()[1:],
                                          index=self.preselected_features)
                    return np.array(yval).ravel(), np.array(
                        yval_CI), yval_posteriors, scores
                else:
                    return np.array(yval).ravel(), np.array(
                        yval_CI), yval_posteriors
            else:
                return np.array(yval).ravel(), np.array(yval_CI)
        else:
            return np.array(yval).ravel()
예제 #41
0
def eval_map(det_results,
             gt_bboxes,
             gt_labels,
             gt_ignore=None,
             scale_ranges=None,
             iou_thr=0.5,
             dataset=None,
             print_summary=True):
    """Evaluate mAP of a dataset.

    Args:
        det_results (list): a list of list, [[cls1_det, cls2_det, ...], ...]
        gt_bboxes (list): ground truth bboxes of each image, a list of K*4
            array.
        gt_labels (list): ground truth labels of each image, a list of K array
        gt_ignore (list): gt ignore indicators of each image, a list of K array
        scale_ranges (list, optional): [(min1, max1), (min2, max2), ...]
        iou_thr (float): IoU threshold
        dataset (None or str or list): dataset name or dataset classes, there
            are minor differences in metrics for different datsets, e.g.
            "voc07", "imagenet_det", etc.
        print_summary (bool): whether to print the mAP summary

    Returns:
        tuple: (mAP, [dict, dict, ...])
    """
    assert len(det_results) == len(gt_bboxes) == len(gt_labels)
    if gt_ignore is not None:
        assert len(gt_ignore) == len(gt_labels)
        for i in range(len(gt_ignore)):
            assert len(gt_labels[i]) == len(gt_ignore[i])
    area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
                   if scale_ranges is not None else None)
    num_scales = len(scale_ranges) if scale_ranges is not None else 1
    eval_results = []
    num_classes = len(det_results[0])  # positive class num
    gt_labels = [
        label if label.ndim == 1 else label[:, 0] for label in gt_labels
    ]
    for i in range(num_classes):
        # get gt and det bboxes of this class
        cls_dets, cls_gts, cls_gt_ignore = get_cls_results(
            det_results, gt_bboxes, gt_labels, gt_ignore, i)
        # calculate tp and fp for each image
        tpfp_func = (tpfp_imagenet
                     if dataset in ['det', 'vid'] else tpfp_default)
        tpfp = [
            tpfp_func(cls_dets[j], cls_gts[j], cls_gt_ignore[j], iou_thr,
                      area_ranges) for j in range(len(cls_dets))
        ]
        tp, fp = tuple(zip(*tpfp))
        # calculate gt number of each scale, gts ignored or beyond scale
        # are not counted
        num_gts = np.zeros(num_scales, dtype=int)
        for j, bbox in enumerate(cls_gts):
            if area_ranges is None:
                num_gts[0] += np.sum(np.logical_not(cls_gt_ignore[j]))
            else:
                gt_areas = (bbox[:, 2] - bbox[:, 0] + 1) * (bbox[:, 3] -
                                                            bbox[:, 1] + 1)
                for k, (min_area, max_area) in enumerate(area_ranges):
                    num_gts[k] += np.sum(
                        np.logical_not(cls_gt_ignore[j])
                        & (gt_areas >= min_area) & (gt_areas < max_area))
        # sort all det bboxes by score, also sort tp and fp
        cls_dets = np.vstack(cls_dets)
        num_dets = cls_dets.shape[0]
        sort_inds = np.argsort(-cls_dets[:, -1])
        tp = np.hstack(tp)[:, sort_inds]
        fp = np.hstack(fp)[:, sort_inds]
        # calculate recall and precision with tp and fp
        tp = np.cumsum(tp, axis=1)
        fp = np.cumsum(fp, axis=1)
        eps = np.finfo(np.float32).eps
        recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
        precisions = tp / np.maximum((tp + fp), eps)
        # calculate AP
        if scale_ranges is None:
            recalls = recalls[0, :]
            precisions = precisions[0, :]
            num_gts = num_gts.item()
        mode = 'area' if dataset != 'voc07' else '11points'
        ap = average_precision(recalls, precisions, mode)
        eval_results.append({
            'num_gts': num_gts,
            'num_dets': num_dets,
            'recall': recalls,
            'precision': precisions,
            'ap': ap
        })
    if scale_ranges is not None:
        # shape (num_classes, num_scales)
        all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
        all_num_gts = np.vstack(
            [cls_result['num_gts'] for cls_result in eval_results])
        mean_ap = []
        for i in range(num_scales):
            if np.any(all_num_gts[:, i] > 0):
                mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
            else:
                mean_ap.append(0.0)
    else:
        aps = []
        for cls_result in eval_results:
            if cls_result['num_gts'] > 0:
                aps.append(cls_result['ap'])
        mean_ap = np.array(aps).mean().item() if aps else 0.0
    if print_summary:
        print_map_summary(mean_ap, eval_results, dataset)

    return mean_ap, eval_results
예제 #42
0
def combine_cubes(cubes,
                  masks,
                  regions=True,
                  final=False,
                  halfset=False,
                  halfsetfinal=False):
    """
    Combine a bunch of cubes using masks with CubeCombine
        
    cubes    -> a list of cubes to use in the combine
    masks    -> a list of goodpix masks from the pipeline
    regions  -> if True, code searches for ds9 region files inside path with same name as 
                pipeline mask (.reg), to mask additional area that one wants to clip
    final    -> is True, append final tag to name and prepare median cubes
    
    halfset       -> if set to tag name, append/uses suffix for coadding indepenent halfs 
    halfsetfinal  -> if set to tag name in final loop, append/uses suffix for coadding indepenent halfs 

    """
    import subprocess
    import os
    import numpy as np
    from astropy.io import fits
    from mypython.fits import pyregmask as msk

    #define some names for the cubes
    if (final):
        cname = "COMBINED_CUBE_FINAL.fits"
        iname = "COMBINED_IMAGE_FINAL.fits"
        cmed = "COMBINED_CUBE_MED_FINAL.fits"
        imed = "COMBINED_IMAGE_MED_FINAL.fits"
        expmap = "COMBINED_EXPMAP_FINAL.fits"
        scriptname = 'runcombine_final.sh'
    elif (halfset):
        cname = "COMBINED_CUBE_{}.fits".format(halfset)
        iname = "COMBINED_IMAGE_{}.fits".format(halfset)
        cmed = "COMBINED_CUBE_MED_{}.fits".format(halfset)
        imed = "COMBINED_IMAGE_MED_{}.fits".format(halfset)
        expmap = "COMBINED_EXPMAP_{}.fits".format(halfset)
        scriptname = 'runcombine_{}.sh'.format(halfset)
    elif (halfsetfinal):
        cname = "COMBINED_CUBE_FINAL_{}.fits".format(halfsetfinal)
        iname = "COMBINED_IMAGE_FINAL_{}.fits".format(halfsetfinal)
        cmed = "COMBINED_CUBE_MED_FINAL_{}.fits".format(halfsetfinal)
        imed = "COMBINED_IMAGE_MED_FINAL_{}.fits".format(halfsetfinal)
        expmap = "COMBINED_EXPMAP_FINAL_{}.fits".format(halfset)
        scriptname = 'runcombine_final_{}.sh'.format(halfsetfinal)
    else:
        cname = "COMBINED_CUBE.fits"
        iname = "COMBINED_IMAGE.fits"
        cmed = "COMBINED_CUBE_MED.fits"
        imed = "COMBINED_IMAGE_MED.fits"
        expmap = "COMBINED_EXPMAP.fits"
        scriptname = 'runcombine.sh'

    if (os.path.isfile(cname)):
        print('Cube {} already exists... skip!'.format(cname))
    else:
        print('Creating combined cube {}'.format(cname))

        if (regions):
            print("Updating the masks")

            #loads list
            listmask = np.loadtxt(masks, dtype=np.dtype('a'))

            #redefine new mask
            mask_new = "new_" + masks
            llms = open(mask_new, "w")

            #if scalar, make it 1 element list
            if (listmask.shape == ()):
                listmask = [listmask]

            #loop over and update with regions
            for i, cmask in enumerate(listmask):

                #Decode bytes-like object
                cmask = cmask.decode('utf-8')
                #create region name
                regname = (cmask.split(".fits")[0]) + ".reg"

                #search if file exist
                if (os.path.isfile(regname)):

                    #update the mask
                    print("Updating mask for {}".format(regname))

                    #open fits
                    cfits = fits.open(cmask)

                    #init reg mask
                    Mask = msk.PyMask(cfits[0].header["NAXIS1"],
                                      cfits[0].header["NAXIS2"], regname)
                    for ii in range(Mask.nreg):
                        Mask.fillmask(ii)
                        if (ii == 0):
                            totmask = Mask.mask
                        else:
                            totmask += Mask.mask

                    #update the mask
                    cfits[0].data = cfits[0].data * 1 * np.logical_not(totmask)
                    savename = cmask.split(".fits")[0] + '_wreg.fits'
                    cfits.writeto(savename, clobber=True)
                    llms.write(savename + '\n')

                else:
                    #keep current mask
                    llms.write(cmask + '\n')

            #done with new masks
            llms.close()

        else:
            print('Using original masks')
            mask_new = masks

        #now run combine
        print('Combine the cube...')

        #make mean cube - write this as script that can be ran indepedently
        scr = open(scriptname, 'w')
        scr.write("export OMP_NUM_THREADS=1\n")
        scr.write("CubeCombine -list " + cubes + " -out " + cname +
                  " -masklist " + mask_new + " -outexp " + expmap + "\n")
        scr.write("Cube2Im -cube " + cname + " -out " + iname + "\n")
        scr.write("CubeCombine -list " + cubes + " -out " + cmed +
                  " -masklist " + mask_new + " -comb median \n")
        scr.write("Cube2Im -cube " + cmed + " -out " + imed)
        scr.close()
        subprocess.call(["sh", scriptname])
예제 #43
0
파일: stica.py 프로젝트: csn92/sima
def _find_useful_components(st_components, threshold, x_smoothing=4):
    """ finds ICA components with axons and brings them to the foreground

    Parameters
    ----------
    st_components : array
        stICA components
        Shape: (num_rows, num_columns, n_components)
    threshold : float
        threshold on gradient measures to cut off
    x_smoothing : int
        number of times to apply gaussiian blur smoothing process to
        each component. Default: 4

    Returns
    -------
    accepted : list
        stICA components which contain axons have been processed
        Shape: n_components
    accepted_components : list
        stICA components which are found to contain axons but without image
        processing applied
    rejected : list
        stICA components that are determined to have no axon information
        in them
    """

    accepted = []
    accepted_components = []
    rejected = []
    for i in xrange(st_components.shape[2]):

        # copy the component, remove pixels with low weights
        frame = st_components[:, :, i].copy()
        frame[frame < 2 * np.std(frame)] = 0

        # smooth the component via static removal and gaussian blur
        for n in xrange(x_smoothing):
            check = frame[1:-1, :-2] + frame[1:-1, 2:] + frame[:-2, 1:-1] + \
                frame[2, 1:-1]
            z = np.zeros(frame.shape)
            z[1:-1, 1:-1] = check
            frame[np.logical_not(z)] = 0

            blurred = ndimage.gaussian_filter(frame, sigma=1)
            frame = blurred + frame

            frame = frame / np.max(frame)
            frame[frame < 2 * np.std(frame)] = 0

        # calculate the remaining static in the component
        static = np.sum(np.abs(frame[1:-1, 1:-1] - frame[:-2, 1:-1])) + \
            np.sum(np.abs(frame[1:-1, 1:-1] - frame[2:, 1:-1])) + \
            np.sum(np.abs(frame[1:-1, 1:-1] - frame[1:-1, :-2])) + \
            np.sum(np.abs(frame[1:-1, 1:-1] - frame[1:-1, 2:])) + \
            np.sum(np.abs(frame[1:-1, 1:-1] - frame[2:, 2:])) + \
            np.sum(np.abs(frame[1:-1, 1:-1] - frame[:-2, 2:])) + \
            np.sum(np.abs(frame[1:-1, 1:-1] - frame[2:, :-2])) + \
            np.sum(np.abs(frame[1:-1, 1:-1] - frame[:-2, :-2]))

        static = static * 2.0 / (frame.shape[0] * frame.shape[1])

        # decide if the component should be accepted or rejected
        if np.sum(static) < threshold:
            accepted.append(frame)
            accepted_components.append(st_components[:, :, i])
        else:
            rejected.append(frame)
    return accepted, accepted_components, rejected
예제 #44
0
def legendrep(x, n):
    '''
    Inputs:
    x - np.array - The center(?) of the Legendre Polynomial 
    n - int or np.array - The order to which this function calculates the Legendre Polynomial's first derivative
    
    If n is an int, every x will be calculated to the nth order
    If n is an array, it must be the same length as x and each x will be calculated to an order equal to the
    corresponding element in the n array
    
    Outputs:
    int - The sum of the terms in the Legendre Polynomial's first derivative
    '''
    x = x.copy()
    output = np.zeros(len(x))

    if isinstance(n, np.ndarray):
        n = n.copy()

        case1 = (n == 0)
        case2 = (n == 1)
        output[case2] = 1
        case3 = np.logical_and((x == 0), (n % 2 == 0))

        i = 0
        while True:
            needsaddition = ((np.floor(n[:]) / 2).astype(int) > i)
            alldone = np.logical_not(needsaddition).all()
            if (alldone):
                break
            add1 = np.logical_and(np.logical_not(case1), np.logical_not(case2))
            add2 = np.logical_and(np.logical_not(case3), needsaddition)
            add = np.logical_and(add1, add2)
            output = np.where(
                add, output[:] + (-1)**(i) * factorial(2 * n - 2 * i) /
                np.math.factorial(i) / factorial(n - i) /
                factorial(n - 2 * i) / 2**n[:] * x[:]**(n - 2 * i), output[:])
            i += 1

        output[np.abs(x) > 1.0] = 0

        return output

    elif (isinstance(n, (int, float))):
        if (n == 0):
            return output
        elif (n == 1):
            return output + 1

        for i in range(int((np.floor(n) / 2))):
            output += (-1)**(i) * np.math.factorial(
                2 * n - 2 * i) / np.math.factorial(i) / np.math.factorial(
                    n - i) / np.math.factorial(n - 2 * i) / 2**n * x[:]**(
                        n - 2 * i)

        case1 = np.logical_and((x == 0), (n % 2 == 0))
        output[case1] = 0
        output[np.abs(x) > 1.0] = 0

        return output

    else:
        raise TypeError("Input must be a numpy array, integer, or float")
예제 #45
0
def load_casa_image(filename,
                    skipdata=False,
                    skipvalid=False,
                    skipcs=False,
                    **kwargs):
    """
    Load a cube (into memory?) from a CASA image. By default it will transpose
    the cube into a 'python' order and drop degenerate axes. These options can
    be suppressed. The object holds the coordsys object from the image in
    memory.
    """

    try:
        from taskinit import ia
    except ImportError:
        raise ImportError(
            "Could not import CASA (casac) and therefore cannot read CASA .image files"
        )

    # use the ia tool to get the file contents
    ia.open(filename)

    # read in the data
    if not skipdata:
        data = ia.getchunk()

    # CASA stores validity of data as a mask
    if not skipvalid:
        valid = ia.getchunk(getmask=True)

    # transpose is dealt with within the cube object

    # read in coordinate system object
    casa_cs = ia.coordsys()

    wcs = wcs_casa2astropy(casa_cs)

    unit = ia.brightnessunit()

    # don't need this yet
    # stokes = get_casa_axis(temp_cs, wanttype="Stokes", skipdeg=False,)

    #    if stokes == None:
    #        order = np.arange(self.data.ndim)
    #    else:
    #        order = []
    #        for ax in np.arange(self.data.ndim+1):
    #            if ax == stokes:
    #                continue
    #            order.append(ax)

    #    self.casa_cs = ia.coordsys(order)

    # This should work, but coordsys.reorder() has a bug
    # on the error checking. JIRA filed. Until then the
    # axes will be reversed from the original.

    # if transpose == True:
    #    new_order = np.arange(self.data.ndim)
    #    new_order = new_order[-1*np.arange(self.data.ndim)-1]
    #    print new_order
    #    self.casa_cs.reorder(new_order)

    # close the ia tool
    ia.close()

    meta = {'filename': filename, 'BUNIT': unit}

    if wcs.naxis == 3:
        mask = BooleanArrayMask(np.logical_not(valid), wcs)
        cube = SpectralCube(data, wcs, mask, meta=meta)

    elif wcs.naxis == 4:
        data, wcs = cube_utils._split_stokes(data.T, wcs)
        mask = {}
        for component in data:
            data[component], wcs_slice = cube_utils._orient(
                data[component], wcs)
            mask[component] = LazyMask(np.isfinite,
                                       data=data[component],
                                       wcs=wcs_slice)

        cube = StokesSpectralCube(data, wcs_slice, mask, meta=meta)

    return cube
예제 #46
0
    def accumulate(self, p = None):
        '''
        Accumulate per image evaluation results and store the result in self.eval
        :param p: input params for evaluation
        :return: None
        '''
        print('Accumulating evaluation results...')
        tic = time.time()
        if not self.evalImgs:
            print('Please run evaluate() first')
        # allows input customized parameters
        if p is None:
            p = self.params
        p.catIds = p.catIds if p.useCats == 1 else [-1]
        T           = len(p.iouThrs)
        R           = len(p.recThrs)
        K           = len(p.catIds) if p.useCats else 1
        A           = len(p.areaRng)
        M           = len(p.maxDets)
        precision   = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
        recall      = -np.ones((T,K,A,M))
        scores      = -np.ones((T,R,K,A,M))

        # create dictionary for future indexing
        _pe = self._paramsEval
        catIds = _pe.catIds if _pe.useCats else [-1]
        setK = set(catIds)
        setA = set(map(tuple, _pe.areaRng))
        setM = set(_pe.maxDets)
        setI = set(_pe.imgIds)
        # get inds to evaluate
        k_list = [n for n, k in enumerate(p.catIds)  if k in setK]
        m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
        a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
        i_list = [n for n, i in enumerate(p.imgIds)  if i in setI]
        I0 = len(_pe.imgIds)
        A0 = len(_pe.areaRng)
        # retrieve E at each category, area range, and max number of detections
        for k, k0 in enumerate(k_list):
            Nk = k0*A0*I0
            for a, a0 in enumerate(a_list):
                Na = a0*I0
                for m, maxDet in enumerate(m_list):
                    E = [self.evalImgs[Nk + Na + i] for i in i_list]
                    E = [e for e in E if not e is None]
                    if len(E) == 0:
                        continue
                    dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])

                    # different sorting method generates slightly different results.
                    # mergesort is used to be consistent as Matlab implementation.
                    inds = np.argsort(-dtScores, kind='mergesort')
                    dtScoresSorted = dtScores[inds]

                    dtm  = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
                    dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet]  for e in E], axis=1)[:,inds]
                    gtIg = np.concatenate([e['gtIgnore'] for e in E])
                    npig = np.count_nonzero(gtIg==0 )
                    if npig == 0:
                        continue
                    tps = np.logical_and(               dtm,  np.logical_not(dtIg) )
                    fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )

                    tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
                    fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
                    for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
                        tp = np.array(tp)
                        fp = np.array(fp)
                        nd = len(tp)
                        rc = tp / npig
                        pr = tp / (fp+tp+np.spacing(1))
                        q  = np.zeros((R,))
                        ss = np.zeros((R,))

                        if nd:
                            recall[t,k,a,m] = rc[-1]
                        else:
                            recall[t,k,a,m] = 0

                        # numpy is slow without cython optimization for accessing elements
                        # use python array gets significant speed improvement
                        pr = pr.tolist(); q = q.tolist()

                        for i in range(nd-1, 0, -1):
                            if pr[i] > pr[i-1]:
                                pr[i-1] = pr[i]

                        inds = np.searchsorted(rc, p.recThrs, side='left')
                        try:
                            for ri, pi in enumerate(inds):
                                q[ri] = pr[pi]
                                ss[ri] = dtScoresSorted[pi]
                        except:
                            pass
                        precision[t,:,k,a,m] = np.array(q)
                        scores[t,:,k,a,m] = np.array(ss)

        self.eval = {
            'params': p,
            'counts': [T, R, K, A, M],
            'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'precision': precision,
            'recall':   recall,
            'scores': scores,
        }

        # accumulate and calculate final multi-label aesthetic f-score
        if p.multi_label:
            assert len(self.full_g_aes) == len(self.full_d_aes), 'Mismatch GT and Pred aesthetic samples.'
            if len(self.full_g_aes) > 0:
                m_precision, m_recall, f_beta, support = precision_recall_fscore_support(y_true=self.full_g_aes,
                       y_pred=self.full_d_aes, average='samples', beta=p.beta, zero_division=0)
                print('Evaluated multi-label f-{} score on {} samples'.format(p.beta, len(self.full_g_aes)))
            else:
                m_precision, m_recall, f_beta = 0, 0, 0
            self.eval['m_precision'] = m_precision
            self.eval['m_recall'] = m_recall
            self.eval['f_beta'] = f_beta

        toc = time.time()
        print('DONE (t={:0.2f}s).'.format(toc-tic))
예제 #47
0
    def construct_cnot_chain(self, diag_pauli_op1: PauliOp, diag_pauli_op2: PauliOp) -> PrimitiveOp:
        r"""
        Construct a ``CircuitOp`` (or ``PauliOp`` if equal to the identity) which takes the
        eigenvectors of ``diag_pauli_op1`` to the eigenvectors of ``diag_pauli_op2``,
        assuming both are diagonal (or performing this operation on their diagonalized Paulis
        implicitly if not). This works by the insight that the eigenvalue of a diagonal Pauli's
        eigenvector is equal to or -1 if the parity is 1 and 1 if the parity is 0, or
        1 - (2 * parity). Therefore, using CNOTs, we can write the parity of diag_pauli_op1's
        significant bits onto some qubit, and then write out that parity onto diag_pauli_op2's
        significant bits.

        Args:
            diag_pauli_op1: The origin ``PauliOp``.
            diag_pauli_op2: The destination ``PauliOp``.

        Return:
            The ``PrimitiveOp`` performs the mapping.
        """
        # TODO be smarter about connectivity and actual distance between pauli and destination
        # TODO be smarter in general

        pauli_1 = (
            diag_pauli_op1.primitive if isinstance(diag_pauli_op1, PauliOp) else diag_pauli_op1
        )
        pauli_2 = (
            diag_pauli_op2.primitive if isinstance(diag_pauli_op2, PauliOp) else diag_pauli_op2
        )
        origin_sig_bits = np.logical_or(pauli_1.z, pauli_1.x)
        destination_sig_bits = np.logical_or(pauli_2.z, pauli_2.x)
        num_qubits = max(len(pauli_1.z), len(pauli_2.z))

        sig_equal_sig_bits = np.logical_and(origin_sig_bits, destination_sig_bits)
        non_equal_sig_bits = np.logical_not(origin_sig_bits == destination_sig_bits)
        # Equivalent to np.logical_xor(origin_sig_bits, destination_sig_bits)

        if not any(non_equal_sig_bits):
            return I ^ num_qubits

        # I am deeply sorry for this code, but I don't know another way to do it.
        sig_in_origin_only_indices = np.extract(
            np.logical_and(non_equal_sig_bits, origin_sig_bits), np.arange(num_qubits)
        )
        sig_in_dest_only_indices = np.extract(
            np.logical_and(non_equal_sig_bits, destination_sig_bits), np.arange(num_qubits)
        )

        if len(sig_in_origin_only_indices) > 0 and len(sig_in_dest_only_indices) > 0:
            origin_anchor_bit = min(sig_in_origin_only_indices)
            dest_anchor_bit = min(sig_in_dest_only_indices)
        else:
            # Set to lowest equal bit
            origin_anchor_bit = min(np.extract(sig_equal_sig_bits, np.arange(num_qubits)))
            dest_anchor_bit = origin_anchor_bit

        cnots = QuantumCircuit(num_qubits)
        # Step 3) Take the indices of bits which are sig_bits in
        # pauli but but not in dest, and cnot them to the pauli anchor.
        for i in sig_in_origin_only_indices:
            if not i == origin_anchor_bit:
                cnots.cx(i, origin_anchor_bit)

        # Step 4)
        if not origin_anchor_bit == dest_anchor_bit:
            cnots.swap(origin_anchor_bit, dest_anchor_bit)

        # Need to do this or a Terra bug sometimes flips cnots. No time to investigate.
        cnots.i(0)

        # Step 6)
        for i in sig_in_dest_only_indices:
            if not i == dest_anchor_bit:
                cnots.cx(i, dest_anchor_bit)

        return PrimitiveOp(cnots)
예제 #48
0
def ubercal_skysub(tims,
                   targetwcs,
                   survey,
                   brickname,
                   bands,
                   mp,
                   subsky_radii=None,
                   plots=False,
                   plots2=False,
                   ps=None,
                   verbose=False):
    """With the ubercal option, we (1) read the full-field mosaics ('bandtims') for
    a given bandpass and put them all on the same 'system' using the overlapping
    pixels; (2) apply the derived corrections to the in-field 'tims'; (3) build
    the coadds (per bandpass) from the 'tims'; and (4) subtract the median sky
    from the mosaic (after aggressively masking objects and reference sources).

    """
    from tractor.sky import ConstantSky
    from legacypipe.reference import get_reference_sources, get_reference_map
    from legacypipe.coadds import make_coadds
    from legacypipe.survey import get_rgb, imsave_jpeg
    from astropy.stats import sigma_clipped_stats

    if plots or plots2:
        import os
        import matplotlib.pyplot as plt

    if plots:
        import matplotlib.patches as patches

        refs, _ = get_reference_sources(survey,
                                        targetwcs,
                                        targetwcs.pixel_scale(), ['r'],
                                        tycho_stars=False,
                                        gaia_stars=False,
                                        large_galaxies=True,
                                        star_clusters=False)

        pixscale = targetwcs.pixel_scale()
        width = targetwcs.get_width() * pixscale / 3600  # [degrees]
        bb, bbcc = targetwcs.radec_bounds(), targetwcs.radec_center(
        )  # [degrees]
        pad = 0.5 * width  # [degrees]

        delta = np.max((np.diff(bb[0:2]), np.diff(bb[2:4]))) / 2 + pad / 2
        xlim = bbcc[0] - delta, bbcc[0] + delta
        ylim = bbcc[1] - delta, bbcc[1] + delta

        plt.clf()
        _, allax = plt.subplots(1,
                                3,
                                figsize=(12, 5),
                                sharey=True,
                                sharex=True)
        for ax, band in zip(allax, ('g', 'r', 'z')):
            ax.set_xlabel('RA (deg)')
            ax.text(0.9,
                    0.05,
                    band,
                    ha='center',
                    va='bottom',
                    transform=ax.transAxes,
                    fontsize=18)

            if band == 'g':
                ax.set_ylabel('Dec (deg)')
            ax.get_xaxis().get_major_formatter().set_useOffset(False)

            # individual CCDs
            these = np.where([tim.band == band for tim in tims])[0]
            col = plt.cm.Set1(np.linspace(0, 1, len(tims)))
            for ii, indx in enumerate(these):
                tim = tims[indx]
                #wcs = tim.subwcs
                wcs = tim.imobj.get_wcs()
                cc = wcs.radec_bounds()
                ax.add_patch(
                    patches.Rectangle((cc[0], cc[2]),
                                      cc[1] - cc[0],
                                      cc[3] - cc[2],
                                      fill=False,
                                      lw=2,
                                      edgecolor=col[these[ii]],
                                      label='ccd{:02d}'.format(these[ii])))
            ax.legend(ncol=2, frameon=False, loc='upper left', fontsize=10)

            # output mosaic footprint
            cc = targetwcs.radec_bounds()
            ax.add_patch(
                patches.Rectangle((cc[0], cc[2]),
                                  cc[1] - cc[0],
                                  cc[3] - cc[2],
                                  fill=False,
                                  lw=2,
                                  edgecolor='k'))

            if subsky_radii:
                racen, deccen = targetwcs.crval
                for rad in subsky_radii:
                    ax.add_patch(
                        patches.Circle((racen, deccen),
                                       rad / 3600,
                                       fill=False,
                                       edgecolor='black',
                                       lw=2))
            else:
                for gal in refs:
                    ax.add_patch(
                        patches.Circle((gal.ra, gal.dec),
                                       gal.radius,
                                       fill=False,
                                       edgecolor='black',
                                       lw=2))

            ax.set_ylim(ylim)
            ax.set_xlim(xlim)
            ax.invert_xaxis()
            ax.set_aspect('equal')

        plt.subplots_adjust(bottom=0.12,
                            wspace=0.05,
                            left=0.12,
                            right=0.97,
                            top=0.95)
        plt.savefig(
            os.path.join(survey.output_dir, 'metrics', 'cus',
                         '{}-ccdpos.jpg'.format(ps.basefn)))

    if plots:
        plt.figure(figsize=(8, 6))
        mods = []
        for tim in tims:
            imcopy = tim.getImage().copy()
            tim.sky.addTo(imcopy, -1)
            mods.append(imcopy)
        C = make_coadds(tims,
                        bands,
                        targetwcs,
                        mods=mods,
                        callback=None,
                        mp=mp)
        imsave_jpeg(os.path.join(survey.output_dir, 'metrics', 'cus',
                                 '{}-pipelinesky.jpg'.format(ps.basefn)),
                    get_rgb(C.comods, bands),
                    origin='lower')

    refs, _ = get_reference_sources(survey,
                                    targetwcs,
                                    targetwcs.pixel_scale(), ['r'],
                                    tycho_stars=True,
                                    gaia_stars=True,
                                    large_galaxies=True,
                                    star_clusters=True)
    refmask = get_reference_map(targetwcs, refs) == 0  # True=skypix
    skydict = {'radii': subsky_radii}

    allbands = np.array([tim.band for tim in tims])
    for band in sorted(set(allbands)):
        print('Working on band {}'.format(band))
        I = np.where(allbands == band)[0]

        bandtims = [
            tims[ii].imobj.get_tractor_image(gaussPsf=True,
                                             pixPsf=False,
                                             subsky=False,
                                             dq=True,
                                             apodize=False) for ii in I
        ]

        # Derive the ubercal correction and then apply it.
        x = coadds_ubercal(bandtims,
                           coaddtims=[tims[ii] for ii in I],
                           plots=plots,
                           plots2=plots2,
                           ps=ps,
                           verbose=True)
        skydict[band] = {'ccds': [tims[ii].name for ii in I], 'delta': x}

        # Apply the correction and return the tims
        for jj, (correction, ii) in enumerate(zip(x, I)):
            tims[ii].data += correction
            tims[ii].sky = ConstantSky(0.0)
            # Also correct the full-field mosaics
            bandtims[jj].data += correction
            bandtims[jj].sky = ConstantSky(0.0)

        ## Check--
        #for jj, correction in enumerate(x):
        #    fulltims[jj].data += correction
        #newcorrection = coadds_ubercal(fulltims)
        #print(newcorrection)

    H, W, pixscale = targetwcs.get_height(), targetwcs.get_width(
    ), targetwcs.pixel_scale()

    C = make_coadds(tims,
                    bands,
                    targetwcs,
                    callback=None,
                    sbscale=False,
                    mp=mp)
    for coimg, coiv, band in zip(C.coimgs, C.cowimgs, bands):
        if subsky_radii:
            # Estimate the sky background from an annulus surrounding the object
            # (assumed to be at the center of the mosaic, targetwcs.crval).
            _, x0, y0 = targetwcs.radec2pixelxy(targetwcs.crval[0],
                                                targetwcs.crval[1])
            xcen, ycen = np.round(x0 -
                                  1).astype('int'), np.round(y0 -
                                                             1).astype('int')
            ymask, xmask = np.ogrid[-ycen:H - ycen, -xcen:W - xcen]

            #cenmask = (xmask**2 + ymask**2) <= (subsky_radii[0] / pixscale)**2 # True=object pixels
            inmask = (xmask**2 + ymask**2) <= (subsky_radii[1] / pixscale)**2
            outmask = (xmask**2 + ymask**2) <= (subsky_radii[2] / pixscale)**2
            skymask = (outmask * 1 - inmask * 1) == 1  # True=skypix

            # Find and mask objects, then get the sky.
            skypix = _build_objmask(coimg, coiv, refmask * (coiv > 0))
            skypix = np.logical_and(skypix, skymask)
            #plt.imshow(skypix, origin='lower') ; plt.savefig('junk.png')
        else:
            skypix = refmask * (coiv > 0)
            skypix = _build_objmask(coimg, coiv, skypix)

        skymean, skymedian, skysig = sigma_clipped_stats(
            coimg, mask=np.logical_not(skypix), sigma=3.0)
        skydict[band].update({
            'mean': skymean,
            'median': skymedian,
            'sigma': skysig,
            'npix': np.sum(skypix)
        })

        I = np.where(allbands == band)[0]
        #print('Band', band, 'Coadd sky:', skymedian)

        if plots2:
            plt.clf()
            plt.hist(coimg.ravel(), bins=50, range=(-3, 3), density=True)
            plt.axvline(skymedian, color='k')
            for ii in I:
                #print('Tim', tims[ii], 'median', np.median(tims[ii].data))
                plt.hist((tims[ii].data - skymedian).ravel(),
                         bins=50,
                         range=(-3, 3),
                         histtype='step',
                         density=True)
            plt.title('Band %s: tim pix & skymedian' % band)
            ps.savefig()

            # Produce skymedian-subtracted, masked image for later RGB plot
            coimg -= skymedian
            coimg[~skypix] = 0.
            #coimg[np.logical_not(skymask * (coiv > 0))] = 0.

        for ii in I:
            tims[ii].data -= skymedian
            #print('Tim', tims[ii], 'after subtracting skymedian: median', np.median(tims[ii].data))

    if plots2:
        plt.clf()
        plt.imshow(get_rgb(C.coimgs, bands),
                   origin='lower',
                   interpolation='nearest')
        ps.savefig()

        for band in bands:
            for tim in tims:
                if tim.band != band:
                    continue
                plt.clf()
                C = make_coadds([tim],
                                bands,
                                targetwcs,
                                callback=None,
                                sbscale=False,
                                mp=mp)
                plt.imshow(get_rgb(C.coimgs, bands).sum(axis=2),
                           cmap='gray',
                           interpolation='nearest',
                           origin='lower')
                plt.title('Band %s: tim %s' % (band, tim.name))
                ps.savefig()

    if plots:
        C = make_coadds(tims, bands, targetwcs, callback=None, mp=mp)
        imsave_jpeg(os.path.join(survey.output_dir, 'metrics', 'cus',
                                 '{}-customsky.jpg'.format(ps.basefn)),
                    get_rgb(C.coimgs, bands),
                    origin='lower')

    if plots2:
        plt.clf()
        for coimg, band in zip(C.coimgs, bands):
            plt.hist(coimg.ravel(),
                     bins=50,
                     range=(-0.5, 0.5),
                     histtype='step',
                     label=band)
        plt.legend()
        plt.title('After adjustment: coadds (sb scaled)')
        ps.savefig()

    return tims, skydict
    def sample_all(self,
                   root_path,
                   gt_boxes,
                   gt_names,
                   num_point_features,
                   random_crop=False,
                   gt_group_ids=None,
                   calib=None):
        sampled_num_dict = {}
        sample_num_per_class = []
        for class_name, max_sample_num in zip(self._sample_classes,
                                              self._sample_max_nums):
            sampled_num = int(max_sample_num -
                              np.sum([n == class_name for n in gt_names]))
            sampled_num = np.round(self._rate * sampled_num).astype(np.int64)
            sampled_num_dict[class_name] = sampled_num
            sample_num_per_class.append(sampled_num)

        sampled_groups = self._sample_classes
        if self._use_group_sampling:
            assert gt_group_ids is not None
            sampled_groups = []
            sample_num_per_class = []
            for group_name, class_names in self._group_name_to_names:
                sampled_nums_group = [sampled_num_dict[n] for n in class_names]
                sampled_num = np.max(sampled_nums_group)
                sample_num_per_class.append(sampled_num)
                sampled_groups.append(group_name)
            total_group_ids = gt_group_ids
        sampled = []
        sampled_gt_boxes = []
        avoid_coll_boxes = gt_boxes

        for class_name, sampled_num in zip(sampled_groups,
                                           sample_num_per_class):
            if sampled_num > 0:
                if self._use_group_sampling:
                    sampled_cls = self.sample_group(class_name, sampled_num,
                                                    avoid_coll_boxes,
                                                    total_group_ids)
                else:
                    sampled_cls = self.sample_class_v2(class_name, sampled_num,
                                                       avoid_coll_boxes)

                sampled += sampled_cls
                if len(sampled_cls) > 0:
                    if len(sampled_cls) == 1:
                        sampled_gt_box = sampled_cls[0]["box3d_lidar"][
                            np.newaxis, ...]
                    else:
                        sampled_gt_box = np.stack(
                            [s["box3d_lidar"] for s in sampled_cls], axis=0)

                    sampled_gt_boxes += [sampled_gt_box]
                    avoid_coll_boxes = np.concatenate(
                        [avoid_coll_boxes, sampled_gt_box], axis=0)
                    if self._use_group_sampling:
                        if len(sampled_cls) == 1:
                            sampled_group_ids = np.array(
                                sampled_cls[0]["group_id"])[np.newaxis, ...]
                        else:
                            sampled_group_ids = np.stack(
                                [s["group_id"] for s in sampled_cls], axis=0)
                        total_group_ids = np.concatenate(
                            [total_group_ids, sampled_group_ids], axis=0)

        if len(sampled) > 0:
            sampled_gt_boxes = np.concatenate(sampled_gt_boxes, axis=0)
            num_sampled = len(sampled)
            s_points_list = []
            for info in sampled:
                s_points = np.fromfile(str(
                    pathlib.Path(root_path) / info["path"]),
                                       dtype=np.float32)
                s_points = s_points.reshape([-1, num_point_features])
                # if not add_rgb_to_points:
                #     s_points = s_points[:, :4]
                if "rot_transform" in info:
                    rot = info["rot_transform"]
                    s_points[:, :3] = box_np_ops.rotation_points_single_angle(
                        s_points[:, :3], rot, axis=2)
                s_points[:, :3] += info["box3d_lidar"][:3]
                s_points_list.append(s_points)
                # print(pathlib.Path(info["path"]).stem)
            # gt_bboxes = np.stack([s["bbox"] for s in sampled], axis=0)
            # if np.random.choice([False, True], replace=False, p=[0.3, 0.7]):
            # do random crop.
            if random_crop:
                s_points_list_new = []
                assert calib is not None
                rect = calib["rect"]
                Trv2c = calib["Trv2c"]
                P2 = calib["P2"]
                gt_bboxes = box_np_ops.box3d_to_bbox(sampled_gt_boxes, rect,
                                                     Trv2c, P2)
                crop_frustums = prep.random_crop_frustum(
                    gt_bboxes, rect, Trv2c, P2)
                for i in range(crop_frustums.shape[0]):
                    s_points = s_points_list[i]
                    mask = prep.mask_points_in_corners(
                        s_points, crop_frustums[i:i + 1]).reshape(-1)
                    num_remove = np.sum(mask)
                    if num_remove > 0 and (s_points.shape[0] -
                                           num_remove) > 15:
                        s_points = s_points[np.logical_not(mask)]
                    s_points_list_new.append(s_points)
                s_points_list = s_points_list_new
            ret = {
                "gt_names": np.array([s["name"] for s in sampled]),
                "difficulty": np.array([s["difficulty"] for s in sampled]),
                "gt_boxes": sampled_gt_boxes,
                "points": np.concatenate(s_points_list, axis=0),
                "gt_masks": np.ones((num_sampled, ), dtype=np.bool_)
            }
            if self._use_group_sampling:
                ret["group_ids"] = np.array([s["group_id"] for s in sampled])
            else:
                ret["group_ids"] = np.arange(gt_boxes.shape[0],
                                             gt_boxes.shape[0] + len(sampled))
        else:
            ret = None
        return ret
예제 #50
0
    def processAlgorithm(self, parameters, context, feedback):
        raster_layer = self.parameterAsRasterLayer(parameters, self.INPUT_DEM, context)
        target_crs = raster_layer.crs()
        rasterPath = raster_layer.source()

        source = self.parameterAsSource(parameters, self.BOUNDARY_LAYER, context)
        if source is None:
            raise QgsProcessingException(self.invalidSourceError(parameters, self.BOUNDARY_LAYER))

        step = self.parameterAsDouble(parameters, self.STEP, context)
        percentage = self.parameterAsBool(parameters, self.USE_PERCENTAGE, context)

        outputPath = self.parameterAsString(parameters, self.OUTPUT_DIRECTORY, context)

        rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly)
        geoTransform = rasterDS.GetGeoTransform()
        rasterBand = rasterDS.GetRasterBand(1)
        noData = rasterBand.GetNoDataValue()

        cellXSize = abs(geoTransform[1])
        cellYSize = abs(geoTransform[5])
        rasterXSize = rasterDS.RasterXSize
        rasterYSize = rasterDS.RasterYSize

        rasterBBox = QgsRectangle(geoTransform[0],
                                  geoTransform[3] - cellYSize * rasterYSize,
                                  geoTransform[0] + cellXSize * rasterXSize,
                                  geoTransform[3])
        rasterGeom = QgsGeometry.fromRect(rasterBBox)

        crs = osr.SpatialReference()
        crs.ImportFromProj4(str(target_crs.toProj4()))

        memVectorDriver = ogr.GetDriverByName('Memory')
        memRasterDriver = gdal.GetDriverByName('MEM')

        features = source.getFeatures(QgsFeatureRequest().setDestinationCrs(target_crs, context.transformContext()))
        total = 100.0 / source.featureCount() if source.featureCount() else 0

        for current, f in enumerate(features):
            if not f.hasGeometry():
                continue

            if feedback.isCanceled():
                break

            geom = f.geometry()
            intersectedGeom = rasterGeom.intersection(geom)

            if intersectedGeom.isEmpty():
                feedback.pushInfo(
                    self.tr('Feature {0} does not intersect raster or '
                            'entirely located in NODATA area').format(f.id()))
                continue

            fName = os.path.join(
                outputPath, 'hystogram_%s_%s.csv' % (source.sourceName(), f.id()))

            ogrGeom = ogr.CreateGeometryFromWkt(intersectedGeom.asWkt())
            bbox = intersectedGeom.boundingBox()
            xMin = bbox.xMinimum()
            xMax = bbox.xMaximum()
            yMin = bbox.yMinimum()
            yMax = bbox.yMaximum()

            (startColumn, startRow) = raster.mapToPixel(xMin, yMax, geoTransform)
            (endColumn, endRow) = raster.mapToPixel(xMax, yMin, geoTransform)

            width = endColumn - startColumn
            height = endRow - startRow

            srcOffset = (startColumn, startRow, width, height)
            srcArray = rasterBand.ReadAsArray(*srcOffset)

            if srcOffset[2] == 0 or srcOffset[3] == 0:
                feedback.pushInfo(
                    self.tr('Feature {0} is smaller than raster '
                            'cell size').format(f.id()))
                continue

            newGeoTransform = (
                geoTransform[0] + srcOffset[0] * geoTransform[1],
                geoTransform[1],
                0.0,
                geoTransform[3] + srcOffset[1] * geoTransform[5],
                0.0,
                geoTransform[5]
            )

            memVDS = memVectorDriver.CreateDataSource('out')
            memLayer = memVDS.CreateLayer('poly', crs, ogr.wkbPolygon)

            ft = ogr.Feature(memLayer.GetLayerDefn())
            ft.SetGeometry(ogrGeom)
            memLayer.CreateFeature(ft)
            ft.Destroy()

            rasterizedDS = memRasterDriver.Create('', srcOffset[2],
                                                  srcOffset[3], 1, gdal.GDT_Byte)
            rasterizedDS.SetGeoTransform(newGeoTransform)
            gdal.RasterizeLayer(rasterizedDS, [1], memLayer, burn_values=[1])
            rasterizedArray = rasterizedDS.ReadAsArray()

            srcArray = numpy.nan_to_num(srcArray)
            masked = numpy.ma.MaskedArray(srcArray,
                                          mask=numpy.logical_or(srcArray == noData,
                                                                numpy.logical_not(rasterizedArray)))

            self.calculateHypsometry(f.id(), fName, feedback, masked,
                                     cellXSize, cellYSize, percentage, step)

            memVDS = None
            rasterizedDS = None
            feedback.setProgress(int(current * total))

        rasterDS = None

        return {self.OUTPUT_DIRECTORY: outputPath}
예제 #51
0
def get_train_pairs(config, data):
    """Prepare training data.

    Args:
      batch_size: mini batch size
      images: list of img_count images. Each image is [width, height, depth, channel], [x,y,z,channel]
      trans_gt: 3D centre point of the ground truth plane wrt the volume centre as origin. [2, img_count, 3]. first dimension is the tv(0) or tc(1) plane
      rots_gt: Quaternions that rotate xy-plane to the GT plane. [2, img_count, 4]. first dimension is the tv(0) or tc(1) plane
      trans_frac: Percentage of middle volume to sample translation vector from. (0-1)
      max_euler: Maximum range of Euler angles to sample from. (+/- max_euler). [3]
      box_size: size of 2D plane. [x,y].
      input_plane: number of input planes (1 or 3)
      plane: TV(0) or TC(1)

    Returns:
      slices: 2D plane images. [batch_size, box_size[0], box_size[1], input_plane]
      actions_tran: [batch_size, 6] the GT classification probability for translation. Hard label, one-hot vector. Gives the axis about which to translate, ie. axis with biggest distance to GT
      trans_diff: [batch_size, 3]. 3D centre point of the ground truth plane wrt the centre of the randomly sampled plane as origin.
      actions_rot:[batch_size, 6] the GT classification probability for rotation. Hard label, one-hot vector. Gives the axis about which to rotate, ie. rotation axis with biggest rotation angle.
      rots_diff: [batch_size, 4]. Rotation that maps the randomly sampled plane to the GT plane.

    """
    images = data.images
    trans_gt = data.trans_vecs
    rots_gt = data.quats
    batch_size = config.batch_size
    box_size = config.box_size
    input_plane = config.input_plane
    trans_frac = config.trans_frac
    max_euler = config.max_euler

    img_count = len(images)
    slices = np.zeros((batch_size, box_size[0], box_size[1], input_plane),
                      np.float32)
    trans_diff = np.zeros((batch_size, 3))
    trans = np.zeros((batch_size, 3))
    rots_diff = np.zeros((batch_size, 4))
    rots = np.zeros((batch_size, 4))
    euler = np.zeros(
        (batch_size, 6, 3)
    )  # 6 Euler angle conventions. 'sxyz', 'sxzy', 'syxz', 'syzx', 'szxy', 'szyx'
    actions_tran = np.zeros((batch_size, 6), np.float32)
    actions_rot = np.zeros((batch_size, 6), np.float32)

    # get image indices randomly for a mini-batch
    ind = np.random.randint(img_count, size=batch_size)

    # Random uniform sampling of Euler angles with restricted range
    euler_angles = geometry.sample_euler_angles_fix_range(
        batch_size, max_euler[0], max_euler[1], max_euler[2])

    for i in xrange(batch_size):
        image = np.squeeze(images[ind[i]])
        img_siz = np.array(image.shape)

        # GT translation and quaternions
        tran_gt = trans_gt[ind[i], :]
        rot_gt = rots_gt[ind[i], :]

        # Randomly sample translation (plane centre) and quaternions
        tran = (np.random.rand(3) * (img_siz * trans_frac) + img_siz *
                (1 - trans_frac) / 2.0) - ((img_siz - 1) / 2.0)
        trans[i, :] = tran
        rot = geometry.quaternion_from_euler(euler_angles[i, 0],
                                             euler_angles[i, 1],
                                             euler_angles[i, 2], 'rxyz')
        rots[i, :] = rot

        ##### Extract plane image #####
        # Initialise identity plane and get orthogonal planes
        if input_plane == 1:
            xyz_coords = plane.init_mesh_by_plane(box_size, 'z')
        elif input_plane == 3:
            xyz_coords = plane.init_mesh_ortho(box_size)

        # Rotate and translate plane
        mat = geometry.quaternion_matrix(rot)
        mat[:3, 3] = tran
        xyz_coords = np.matmul(mat, xyz_coords)

        # Extract image plane
        if input_plane == 1:
            slices[i, :, :, 0], _ = plane.extract_plane_from_mesh(
                image, xyz_coords, box_size, 1)
        elif input_plane == 3:
            slice_single, _ = plane.extract_plane_from_mesh_batch(
                image, xyz_coords, box_size, 1)
            slices[i] = np.transpose(slice_single, (1, 2, 0))

        ##### Compute GT labels #####
        # Translation and rotation regression outputs. Compute difference in tran and quat between sampled plane and GT plane (convert to rotation matrices first)
        mat_inv = geometry.inv_mat(mat)
        mat_gt = geometry.quaternion_matrix(rot_gt)
        mat_gt[:3, 3] = tran_gt
        mat_diff = np.matmul(mat_inv, mat_gt)
        trans_diff[i, :] = mat_diff[:3, 3]
        rots_diff[i, :] = geometry.quaternion_from_matrix(mat_diff,
                                                          isprecise=True)

        # Rotation classification output. Compute Euler angles for the six different conventions
        euler[i, 0, :] = np.array(
            geometry.euler_from_matrix(mat_diff, axes='sxyz'))
        euler[i, 1, :] = np.array(
            geometry.euler_from_matrix(mat_diff, axes='sxzy'))
        euler[i, 2, :] = np.array(
            geometry.euler_from_matrix(mat_diff, axes='syxz'))
        euler[i, 3, :] = np.array(
            geometry.euler_from_matrix(mat_diff, axes='syzx'))
        euler[i, 4, :] = np.array(
            geometry.euler_from_matrix(mat_diff, axes='szxy'))
        euler[i, 5, :] = np.array(
            geometry.euler_from_matrix(mat_diff, axes='szyx'))

    # Rotation classification output.
    max_ind_rot = np.argmax(np.abs(euler[:, :, 0]), axis=1)
    rot_x_max = np.logical_or(max_ind_rot == 0, max_ind_rot == 1)
    rot_y_max = np.logical_or(max_ind_rot == 2, max_ind_rot == 3)
    rot_z_max = np.logical_or(max_ind_rot == 4, max_ind_rot == 5)
    actions_ind_rot = np.zeros((batch_size), dtype=np.uint16)
    actions_ind_rot[rot_x_max] = 0
    actions_ind_rot[rot_y_max] = 1
    actions_ind_rot[rot_z_max] = 2
    max_euler = euler[np.arange(batch_size), max_ind_rot,
                      np.zeros(batch_size, dtype=np.uint16)]  # [batch_size]
    is_positive = (max_euler > 0)
    actions_ind_rot[is_positive] = actions_ind_rot[is_positive] * 2
    actions_ind_rot[np.logical_not(
        is_positive)] = actions_ind_rot[np.logical_not(is_positive)] * 2 + 1
    actions_rot[np.arange(batch_size), actions_ind_rot] = 1

    # Translation classification output
    max_ind_tran = np.argmax(np.abs(trans_diff), axis=1)  # [batch_size]
    max_trans_diff = trans_diff[np.arange(batch_size),
                                max_ind_tran]  # [batch_size]
    is_positive = (max_trans_diff > 0)
    actions_ind_tran = np.zeros((batch_size), dtype=np.uint16)
    actions_ind_tran[is_positive] = max_ind_tran[is_positive] * 2
    actions_ind_tran[np.logical_not(
        is_positive)] = max_ind_tran[np.logical_not(is_positive)] * 2 + 1
    actions_tran[np.arange(batch_size), actions_ind_tran] = 1

    return slices, actions_tran, trans_diff, actions_rot, rots_diff
예제 #52
0
    def augment(self, sample, **kwargs):
        imgs = kwargs['imgs']

        # Find union of bounding boxes.
        self.bbox = dict()
        bbox = None
        for key in imgs:
            dim = self.spec[key][-3:]
            b = centered_box((0, 0, 0), dim)
            bbox = b if bbox is None else bbox.merge(b)
            self.bbox[key] = b

        # Create a mask.
        self.offset = bbox.min()
        self.dim = bbox.size()

        for key in imgs:
            # TODO(kisuk): Augmentation mask.
            # # Extract augmentation mask.
            # if key+'_augmask' in sample:
            #     msk = sample[key+'_mask'].astype('float32')
            # else:
            #     msk = np.zeros(sample[key].shape, 'float32')

            # Random box augmentation.
            count = 0
            density = self.max_density * np.random.rand()
            goal = bbox.volume() * density
            # DEBUG(kisuk):
            # print 'density: %.2f' % density
            while True:
                # Random location.
                m = self.min_dim  # Margin.
                z = np.random.randint(0, self.dim[0])
                y = np.random.randint(0, self.dim[1])
                x = np.random.randint(0, self.dim[2])
                loc = Vec3d(z, y, x) + self.offset
                # Random box size.
                dim = np.random.randint(self.min_dim, self.max_dim + 1, 3)
                # Anisotropy.
                dim[0] /= int(self.aspect_ratio)
                # Box.
                box = bbox.intersect(centered_box(loc, dim))
                # Local coordiate.
                box.translate(-self.offset)
                vmin = box.min()
                vmax = box.max()
                sz = box.size()

                # Random choice.
                enabled = self.mode > 0
                rule = np.random.rand(5)
                rule[np.logical_not(enabled)] = 0
                rule = rule >= rule.max()

                # Slices.
                # s0 = vmin[0]:vmax[0]
                # s1 = vmin[1]:vmax[1]
                # s2 = vmin[2]:vmax[2]
                s0 = slice(vmin[0], vmax[0])
                s1 = slice(vmin[1], vmax[1])
                s2 = slice(vmin[2], vmax[2])

                # (1) Random fill-out.
                if rule[0]:
                    assert enabled[0]
                    val = self.mode[0]  # Fill-out value.
                    if val > 1:
                        val = np.random.rand()
                    sample[key][..., s0, s1, s2] = val

                # (2) Alpha.
                if rule[1]:
                    assert enabled[1]
                    alpha = np.random.rand() * self.mode[1]
                    sample[key][..., s0, s1, s2] *= alpha

                # (3) Gaussian white noise (additive or multiplicative).
                if rule[2]:
                    assert enabled[2]
                    scale = self.mode[2]
                    if np.random.rand() < 0.5:
                        val = np.random.normal(loc=0.0, scale=scale, size=sz)
                        sample[key][..., s0, s1, s2] += val[...]
                    else:
                        val = np.random.normal(loc=1.0, scale=scale, size=sz)
                        sample[key][..., s0, s1, s2] *= val[...]

                # (4) Uniform white noise.
                if rule[3]:
                    assert enabled[3]
                    val = np.random.rand(sz[0], sz[1], sz[2])
                    # Random Gaussian blur.
                    sigma = [0, 0, 0]
                    sigma[0] = np.random.rand() * self.sigma_max
                    sigma[1] = np.random.rand() * self.sigma_max
                    sigma[2] = np.random.rand() * self.sigma_max
                    # Anisotropy.
                    sigma[0] /= self.aspect_ratio
                    val = gaussian_filter(val, sigma=sigma)
                    sample[key][..., s0, s1, s2] = val[...]

                # (5) 3D blur.
                if rule[4]:
                    assert enabled[4]
                    img = sample[key][..., s0, s1, s2]
                    # Random Gaussian blur.
                    sigma = [0] * img.ndim
                    sigma[-3] = np.random.rand() * self.sigma_max
                    sigma[-2] = np.random.rand() * self.sigma_max
                    sigma[-1] = np.random.rand() * self.sigma_max
                    # Anisotropy.
                    sigma[-3] /= self.aspect_ratio
                    img = gaussian_filter(img, sigma=sigma)
                    sample[key][..., s0, s1, s2] = img

                # # Update augmentation mask.
                # msk[...,s0,s1,s2] = 1

                # Stop condition.
                count += box.volume()
                if count > goal:
                    break

            # Clip.
            sample[key] = np.clip(sample[key], 0, 1)

            # # Augmentation mask.
            # sample[key+'_augmask'] = msk

        return sample
예제 #53
0
            #			add_mistake_limit = 30.0
            #			remove_mistake_limit = 30.0

            #perform region growing
            while True:

                #determine the current points and the neighboring points
                currentPoints = points[currentMask, :].copy()
                newMinDims = minDims.copy()
                newMaxDims = maxDims.copy()
                newMinDims -= 1
                newMaxDims += 1
                mask = np.logical_and(
                    np.all(point_voxels >= newMinDims, axis=1),
                    np.all(point_voxels <= newMaxDims, axis=1))
                mask = np.logical_and(mask, np.logical_not(currentMask))
                mask = np.logical_and(mask, np.logical_not(visited))

                #determine which points to accept
                expandPoints = points[mask, :].copy()
                expandClass = obj_id[mask] == target_id
                mask_idx = np.nonzero(mask)[0]
                if stuck:
                    expandID = mask_idx[expandClass]
                else:
                    #					mistake_sample = np.random.random(len(mask_idx)) < add_mistake_prob
                    mistake_sample = np.random.random(len(mask_idx)) < min(
                        add_mistake_prob, add_mistake_limit /
                        (len(mask_idx) + 1))
                    expand_with_mistake = np.logical_xor(
                        expandClass, mistake_sample)
예제 #54
0
파일: train.py 프로젝트: tsbertalan/gudrun
datafile_path = '/home/tsbertalan/Dropbox/data/gudrun/tp_apartment_labels.npz'
data_npz = np.load(datafile_path, allow_pickle=True)
if 'X' in data_npz:
    X = data_npz['X']
    Y = data_npz['Y']
else:
    assert 'images' in data_npz, list(data_npz.keys())
    X = data_npz['images']
    Y = data_npz['label_images']
    X = np.array([x for (x,y) in zip(X, Y) if len(y) > 0])
    from functools import reduce
    Y = np.array([reduce(np.logical_and, [a.astype('bool') for a in y]) for y in Y if len(y) > 0])

if len(Y.shape) == 3:
    Y = np.stack([
        Y, np.logical_not(Y)
    ], axis=-1)

X = X.astype('float32')
Y = Y.astype('bool')

describe('X', X)
describe('Y', Y)

## Define a couple plotting functions.

#  Some default colors.
_colors = np.array([
    [0.88798955, 0.17623193, 0.42475507],
    [0.37974051, 0.69121509, 0.61483237],
    [0.02141717, 0.48236297, 0.0180039 ],
예제 #55
0
def prepare_data(uko,upar,minimal,cline_s,minimaly,rho):
	arc = coasti.arc_length(cline_s)
	
	# New coordiantes
	along 	= np.zeros((minimal.shape[0],1000))
	across	= np.zeros((minimal.shape[0],1000))	
	
	# Variables that carry the new coordiantes
	raw 	= Args()
	full	= Args()
	raw.upar	= np.zeros((minimal.shape[0],1000))
	raw.rho		= np.zeros((minimal.shape[0],1000))
	full.upar	= np.zeros((minimal.shape[0],1000))
	full.rho	= np.zeros((minimal.shape[0],1000))
		
	for y in range(len(arc)):
		along[y,:] = arc[y]
	
	for x in range(1000):
		across[:,x] = -1+x*0.01
	
	mini = np.around(minimal,2)
	
	for y in range(len(arc)):
		for x in range(minimal.shape[1]):
			if uko.mask[y,x] == False and mini[y,x] < 10 and mini[y,x] > -2 and mini[y,x] != 0:
				a = across[y,:]-mini[y,x]
				tmp = np.where(abs(a)<0.005)
				tmp = tmp[0]
				if len(tmp) != 0:
					tmp = tmp[0]
				#print tmp
					tmp2 = np.int16(minimaly[y,x])
					raw.upar[tmp2,tmp] = upar[y,x]
					raw.rho[tmp2,tmp]  = rho[y,x]
				#else:
					#print("no hit")
	data_raw_upar = raw.upar.copy()
	data_raw_rho = raw.rho.copy()
	for y in range(len(arc)):
		c1 = np.zeros((raw.upar.shape[1]))
		c1[:] = raw.upar[y,:]
		c2 = np.zeros((raw.rho.shape[1]))
		c2[:] = raw.rho[y,:]
		np.reshape(c1, len(c1))
		bad_indexes = (c1==0)
		c1[bad_indexes] = np.nan
		c2[bad_indexes] = np.nan
		good_indexes = np.logical_not(bad_indexes)
		good_data1 = c1[good_indexes].copy()
		good_data2 = c2[good_indexes].copy()
		if good_data1.size:
			interpolated1 = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data1)
			c1[bad_indexes] = interpolated1
			interpolated2 = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data2)
			c2[bad_indexes] = interpolated2
			full.upar[y,:] = c1.copy()
			full.rho[y,:] = c2.copy()
		
		tmp = np.nonzero(data_raw_upar[y,:])
		tmp = tmp[0]
		if tmp.size:
			tmp1 = min(tmp)
			full.upar[y,0:tmp1-1] = 0.
			tmp2 = max(tmp)
			full.upar[y,tmp2+1:full.upar.shape[1]] = 0.
		
		tmp = np.nonzero(data_raw_rho[y,:])
		tmp = tmp[0]
		if tmp.size:
			tmp1 = min(tmp)
			full.rho[y,0:tmp1-1] = np.nan
			tmp2 = max(tmp)
			full.rho[y,tmp2+1:full.rho.shape[1]] = np.nan
	
	tmp = data_raw_upar[10,:]
	index, value = min(enumerate(tmp), key=operator.itemgetter(1))
	ref = across[10,index]

	
	acr = across.copy()
	for y in range(across.shape[0]):
		tmp = data_raw_upar[y,:]
		index, value = min(enumerate(tmp), key=operator.itemgetter(1))
		diff = across[y,index] - ref
		acr[y,:] = across[y,:] -diff
	#acr=data_raw.rho - full.rho
	#acr=0
	return [along,across,acr,raw,full]
def get_user_aligned_reachable_top_n_items(H, ns, immutable_items=[], 
                           ratings=[], mutable_items=[], 
                           reg=0, bias=None, constraints=None, changet=False):
    '''
    quick evaluation of reachability by checking whether
        i = argmax_j q_j^T v with v = argmin |q_i - v|
    FORMERLY: v = argmax q_i^T v / |v|
    this is a sufficient but not necessary condition for reachability.

    H: matrix factorization item model
    n: top-n
    immutable_items: indices for which user ratings cannot be changed
    ratings: corresponding ratings
    mutable_item: indices for which users can change ratings
    method: item-base (no user to consider) or not
    reg: l2 regularization in model
    bias: (item-bias, user-bias, b0): additional item/user/overall bias in model
    '''
    n_latent_features, n_items = H.shape

    # TODO: should assert disjoint sets
    full_support = np.concatenate([immutable_items,mutable_items]).astype(int)
    Q = H.T
    B = Q[full_support].T.dot(Q[full_support])+reg*np.eye(n_latent_features)
    Binv = scipy.linalg.inv(B) # TODO: faster?
    A = Binv.dot(Q[mutable_items].T) 

    r_vec = np.zeros(n_latent_features)
    if len(immutable_items) > 0:
        r_vec += Q[immutable_items].T.dot(ratings)
    if bias is not None:
        item_bias, user_bias, b0 = bias
        r_vec -= Q[full_support].T.dot(item_bias[full_support]+user_bias+b0)
    v0 = Binv.dot(r_vec)

    if constraints is None: 
        AAt = A.dot(scipy.linalg.pinv(A))
        M = Q.dot(AAt).dot(Q.T) + Q.dot(v0 - AAt.dot(v0)).reshape(1,-1)
        if bias is not None: 
            item_bias, _, _ = bias
            M += item_bias.reshape(1,-1)

    elif constraints is not None:
        lb, ub = constraints
        if changet:
            test_vs = []
            for i in range(n_items):
                # doesn't seem to make a difference once we add the lower bound of 1
                Als = np.hstack([A, -Q[i][:,np.newaxis]])
                bls = -v0

                bounds=([lb]*len(mutable_items)+[1], [ub]*len(mutable_items)+[np.inf])
                res = scipy.optimize.lsq_linear(Als, bls, bounds=bounds)
                vi = A.dot(res['x'][:len(mutable_items)])-v0
                test_vs.append(vi)
            V = np.array(test_vs).T
        else:
            # TODO: would vectorizing over items be faster in this case?
            # or implement matrix constrained least squares
            test_vs = []
            for i in range(n_items):
                Als = A
                bls = Q[i]-v0

                bounds=(lb,ub)
                res = scipy.optimize.lsq_linear(Als, bls, bounds=bounds)
                vi = A.dot(res['x'])-v0
                test_vs.append(vi)
            V = np.array(test_vs).T
        M = Q.dot(V)
        if bias is not None: 
                item_bias, _, _ = bias
                M += item_bias.reshape(1,-1)

    else:
        M = np.zeros([n_items,n_items]); Q = H.T
        B = Q[full_support].T.dot(Q[full_support])+reg*np.eye(n_latent_features)
        Binv = scipy.linalg.inv(B) # TODO: faster?
        A = Binv.dot(Q[mutable_items].T)

        # QProjA = (Q A) (A.T A)^-1 A.T
        # (A.T A)^-1 A.T = A^dagger = Q^dagger B
        Qdecomp, Rdecomp = np.linalg.qr(A)
        ProjA = Qdecomp.dot(A.T)

        QProjA = Q.dot(ProjA)
        QProjAnorm = np.linalg.norm(QProjA, axis=1)

        r_vec = np.zeros(n_latent_features)
        if len(immutable_items) > 0:
            r_vec += Q[immutable_items].T.dot(ratings)
        if bias is not None:
            item_bias, user_bias, b0 = bias
            r_vec -= Q[full_support].T.dot(item_bias[full_support]+user_bias+b0)
        else:
            item_bias = np.zeros(n_items)
        v0 = Binv.dot(r_vec)

        v0perp = ProjA.dot(v0)-v0
        v0perp_norm = np.linalg.norm(v0perp)
        Qv0perp = Q.dot(v0perp)

        C = Qv0perp + item_bias

        condition1 = C > 0
        condition2 = np.all(v0 == 0) * (item_bias <= 0)
        condition3 = np.logical_not(np.logical_or(condition1, condition2))
        # print(sum(condition1), sum(condition2), sum(condition3))

        if sum(condition1) > 0:
            M[condition1] = C.reshape(1,-1)
        if sum(condition2) > 0:
            M[condition2] = QProjA[condition2].dot(Q.T)

        if sum(condition3) > 0:
            M[condition3] = v0perp_norm * QProjA[condition3].dot(Q.T) + np.abs(C[condition3,np.newaxis]).dot(C[:,np.newaxis].T) / v0perp_norm
            M[condition3] /= np.sqrt(np.power(QProjAnorm[condition3],2)*v0perp_norm**2 +
                                 np.power(C[condition3],2) )[:,np.newaxis]

    M[:,full_support] = -np.inf

    argsort = M.argsort(axis=1)
    ret = []
    for n in ns:
        reachable_items = np.unique(argsort[:,-n:])
        reachable_items = [i for i in reachable_items if i not in full_support]
        ret.append(reachable_items)
    return ret
예제 #57
0
    def _sparse_fit(self, X, strategy, missing_values, axis):
        """Fit the transformer on sparse data."""
        # Imputation is done "by column", so if we want to do it
        # by row we only need to convert the matrix to csr format.
        if axis == 1:
            X = X.tocsr()
        else:
            X = X.tocsc()

        # Count the zeros
        if missing_values == 0:
            n_zeros_axis = np.zeros(X.shape[not axis], dtype=int)
        else:
            n_zeros_axis = X.shape[axis] - np.diff(X.indptr)

        # Mean
        if strategy == "mean":
            if missing_values != 0:
                n_non_missing = n_zeros_axis

                # Mask the missing elements
                mask_missing_values = _get_mask(X.data, missing_values)
                mask_valids = np.logical_not(mask_missing_values)

                # Sum only the valid elements
                new_data = X.data.copy()
                new_data[mask_missing_values] = 0
                X = sparse.csc_matrix((new_data, X.indices, X.indptr),
                                      copy=False)
                sums = X.sum(axis=0)

                # Count the elements != 0
                mask_non_zeros = sparse.csc_matrix(
                    (mask_valids.astype(np.float64),
                     X.indices,
                     X.indptr), copy=False)
                s = mask_non_zeros.sum(axis=0)
                n_non_missing = np.add(n_non_missing, s)

            else:
                sums = X.sum(axis=axis)
                n_non_missing = np.diff(X.indptr)

            # Ignore the error, columns with a np.nan statistics_
            # are not an error at this point. These columns will
            # be removed in transform
            with np.errstate(all="ignore"):
                return np.ravel(sums) / np.ravel(n_non_missing)

        # Median + Most frequent
        else:
            # Remove the missing values, for each column
            columns_all = np.hsplit(X.data, X.indptr[1:-1])
            mask_missing_values = _get_mask(X.data, missing_values)
            mask_valids = np.hsplit(np.logical_not(mask_missing_values),
                                    X.indptr[1:-1])

            # astype necessary for bug in numpy.hsplit before v1.9
            columns = [col[astype(mask, bool, copy=False)]
                       for col, mask in zip(columns_all, mask_valids)]

            # Median
            if strategy == "median":
                median = np.empty(len(columns))
                for i, column in enumerate(columns):
                    median[i] = _get_median(column, n_zeros_axis[i])

                return median

            # Most frequent
            elif strategy == "most_frequent":
                most_frequent = np.empty(len(columns))

                for i, column in enumerate(columns):
                    most_frequent[i] = _most_frequent(column,
                                                      0,
                                                      n_zeros_axis[i])

                return most_frequent
예제 #58
0
파일: surf_fill.py 프로젝트: bpinsard/misc
def make_5tt(parc_file, lh_white, rh_white, lh_pial, rh_pial, subdiv=4):
    parc = nb.load(parc_file)
    voxsize = np.asarray(parc.header.get_zooms()[:3])
    parc_data = parc.get_data()
    lh_wm = read_surf(lh_white, parc)
    rh_wm = read_surf(rh_white, parc)
    lh_gm = read_surf(lh_pial, parc)
    rh_gm = read_surf(rh_pial, parc)

    def fill_hemis(lh_surf, rh_surf):
        vertices = np.vstack([lh_surf[0], rh_surf[0]])
        tris = np.vstack([lh_surf[1], rh_surf[1] + lh_surf[0].shape[0]])
        pve_voxsize = voxsize / float(subdiv)
        mat = parc.affine.dot(np.diag([1 / float(subdiv)] * 3 + [1]))
        shape = np.asarray(parc.shape) * subdiv
        fill = surf_fill_vtk(vertices, tris, mat, shape)
        pve = reduce(
            lambda x, y: x + fill[y[0]::subdiv, y[1]::subdiv, y[2]::subdiv],
            np.mgrid[:subdiv, :subdiv, :subdiv].reshape(3, -1).T,
            0).astype(np.float32)
        pve /= float(subdiv**3)
        return pve

    wm_pve = fill_hemis(lh_wm, rh_wm)
    gm_pve = fill_hemis(lh_gm, rh_gm)

    def group_rois(rois_ids):
        m = np.zeros(parc.shape, dtype=np.bool)
        for i in rois_ids:
            np.logical_or(parc_data == i, m, m)
        return m

    gm_rois = group_rois([8, 47, 17, 18, 53, 54]).astype(np.float32)
    gm_smooth = scipy.ndimage.gaussian_filter(gm_rois, sigma=voxsize)

    subcort_rois = group_rois([10, 11, 12, 13, 26, 49, 50, 51, 52,
                               58]).astype(np.float32)
    subcort_smooth = scipy.ndimage.gaussian_filter(subcort_rois, sigma=voxsize)

    wm_rois = group_rois(
        [7, 16, 28, 46, 60, 85, 192, 88, 250, 251, 252, 253, 254,
         255]).astype(np.float32)

    wm_smooth = scipy.ndimage.gaussian_filter(wm_rois, sigma=voxsize)

    # remove csf at the end of brainstem for streamlines to medulla
    # suppose hcp orientation storage
    bs_mask = parc_data == 16
    bs_vdc_dil = scipy.ndimage.morphology.binary_dilation(group_rois(
        [16, 60, 28]),
                                                          iterations=2)
    # mask of boundary between brainstem and cerebellar gray matter
    bs_vdc_excl = np.logical_and(
        bs_vdc_dil,
        np.logical_not(group_rois([16, 7, 46, 60, 28, 10, 49, 2, 41, 0])))

    lbs = np.where((bs_mask).any(-1).any(0))[0][-1] - 3

    parc_data_mask = parc_data > 0
    outer_csf = np.logical_and(
        np.logical_not(parc_data_mask),
        scipy.ndimage.morphology.binary_dilation(parc_data_mask))

    ## create a fake GM rois at the end of brainstem for cerebro-spinal tracking
    nb.save(nb.Nifti1Image(outer_csf.astype(np.int32), parc.affine),
            'outer_csf.nii')

    csf_rois = group_rois([4, 5, 14, 15, 24, 30, 31, 43, 44, 62, 63, 72])
    nb.save(nb.Nifti1Image(csf_rois.astype(np.int32), parc.affine),
            'csf_rois.nii')

    csf_smooth = scipy.ndimage.gaussian_filter(np.logical_or(
        csf_rois, outer_csf).astype(np.float32),
                                               sigma=voxsize)
    nb.save(nb.Nifti1Image(csf_smooth, parc.affine), 'csf_smooth.nii')

    bs_roi = csf_smooth.copy()
    bs_roi[..., :lbs, :] = 0
    csf_smooth[..., lbs:, :] = 0
    wm_smooth[..., lbs:, :] = 0

    # add csf around brainstem and ventral DC to remove direct connection to gray matter
    csf_smooth[bs_vdc_excl] += gm_smooth[bs_vdc_excl]
    gm_smooth[bs_vdc_excl] = 0

    mask88 = parc_data == 88
    print csf_smooth[mask88].sum(), subcort_smooth[mask88].sum()

    #    csf_smooth -= wm_smooth
    #    csf_smooth[csf_smooth<0]=0

    nb.save(nb.Nifti1Image(wm_pve, parc.affine), 'wm_pve.nii')
    nb.save(nb.Nifti1Image(wm_smooth, parc.affine), 'wm_smooth.nii')
    nb.save(nb.Nifti1Image(subcort_smooth, parc.affine), 'subcort_smooth.nii')

    wm = wm_pve + wm_smooth - csf_smooth - subcort_smooth
    wm[wm > 1] = 1
    wm[wm < 0] = 0

    print 267, np.count_nonzero(wm[mask88])

    gm = gm_pve - wm_pve - wm - subcort_smooth + gm_smooth + bs_roi
    gm[gm < 0] = 0

    tt5 = np.concatenate([
        gm[..., np.newaxis], subcort_smooth[..., np.newaxis],
        wm[..., np.newaxis], csf_smooth[..., np.newaxis],
        np.zeros(parc.shape + (1, ), dtype=np.float32)
    ], 3)

    tt5 /= tt5.sum(-1)[..., np.newaxis]
    tt5[np.isnan(tt5)] = 0

    return nb.Nifti1Image(tt5.astype(np.float32), parc.affine)
예제 #59
0
def raster_to_struct_grid(datain, imeas='covar', rtol=1e-10):
    """Create an auto-variogram or auto-covariance map from 1D or 2D rasters.
    This computes auto-variogram or auto-covariance maps from
    1D or 2D rasters. This function computes variograms/covariances in the
    frequency domain via the Fast Fourier Transform (``np.fftn``).

    Note:
        For viewing the results, please use the ``plot_struct_grid`` method
        from the ``plots`` module.

    Note:
        Missing values, flagged as ``np.nan``, are allowed.

    Args:
        datain (np.ndarray): input arrray with raster in GeoEas format
        imeas (str): key indicating which structural measure to compute:
            ``'var'`` for semi-variogram or ``'covar'`` for covariogram.
        gridspecs (list(GridSpec)): array with grid specifications using
            ``GridSpec`` objects
        rtol (float): the tolerance. Default is 1e-10

    Return:
        tuple(np.ndarray, np.ndarray):
            output array with variogram or covariogram map, depending
            on variogram choice, with size: in 1D: ( 2*nxOutHalf+1 ) or in 2D:
            ( 2*nxOutHalf+1 x 2*nxOutHalf+1 ).

            output array with number of pairs available in each lag,
            of same size as outStruct

    References:
        Originally implemented in MATLAB by:
            Phaedon Kyriakidis,
            Department of Geography,
            University of California Santa Barbara,
            May 2005

        Reimplemented into Python by:
            Jonah Bartrand,
            Department of Geophysics,
            Colorado School of Mines,
            October 2018

        Algorith based on:
            Marcotte, D. (1996): Fast Variogram Computation with FFT,
            Computers & Geosciences, 22(10), 1175-1186.
    """
    # Check imeas
    itypes = ['covar', 'var']
    if isinstance(imeas, int) and imeas < 2 and imeas > -1:
        imeas = itypes[imeas]
    if imeas not in itypes:
        raise RuntimeError(
            "imeas argument must be one of 'covar' for covariogram or 'var' for semi-variance. Not {}"
            .format(imeas))

    data_dims = datain.shape
    nDim = len(data_dims)

    ## Get appropriate dimensions
    # find the closest multiple of 8 to obtain a good compromise between
    # speed (a power of 2) and memory required
    out_dims = [2 * d - 1 for d in data_dims
                ]  #[int(np.ceil((2*d-1)/8)*8) for d in data_dims]

    ## Form an indicator  matrix:
    # 0's for all data values, 1's for missing values
    missing_data_ind = np.isnan(datain)
    data_loc_ind = np.logical_not(missing_data_ind)
    # In data matrix, replace missing values by 0;
    datain[missing_data_ind] = 0  # missing replaced by 0

    ## FFT of datain
    fD = np.fft.fftn(datain, s=out_dims)

    ## FFT of datain*datain
    fDD = np.fft.fftn(datain * datain, s=out_dims)

    ## FFT of the indicator matrix
    fI = np.fft.fftn(data_loc_ind, s=out_dims)

    ## FFT of datain*indicator
    fID = np.fft.fftn(datain * data_loc_ind, s=out_dims)

    ## Compute number of pairs at all lags
    outNpairs = np.real(np.fft.ifftn(np.abs(fI)**2)).astype(int)
    #Edit remove single formating for matlab v6
    #outNpairs = single(outNpairs);

    cov = np.real(
        np.fft.ifftn(np.abs(fD)**2) / np.fft.ifftn(np.abs(fI)**2) -
        np.fft.ifftn(np.conj(fD) * fI) * np.fft.ifftn(np.conj(fI) * fD) /
        (np.fft.ifftn(np.abs(fI)**2))**2)

    if imeas == 'var':
        outStruct = np.max(cov) - cov
    else:
        outStruct = cov

    ## Reduce matrix to required size and shift,
    # so that the 0 lag appears at the center of each matrix

    unpad_ind = [[int(d / 2), int(3 * d / 2)] for d in data_dims]
    unpad_list = [np.arange(*l) for l in unpad_ind]
    unpad_coord = np.meshgrid(*unpad_list, indexing='ij')

    outStruct = np.fft.fftshift(outStruct)[tuple(unpad_coord)]
    outNpairs = np.fft.fftshift(outNpairs)[tuple(unpad_coord)]

    indzeros = outNpairs < (np.max(outNpairs) * rtol)
    outStruct[indzeros] = np.nan

    return outStruct, outNpairs
예제 #60
0
    def transform(self, X):
        """Impute all missing values in X.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            The input data to complete.
        """
        if self.axis == 0:
            check_is_fitted(self, 'statistics_')

        # Since two different arrays can be provided in fit(X) and
        # transform(X), the imputation data need to be recomputed
        # when the imputation is done per sample
        if self.axis == 1:
            X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
                            force_all_finite=False, copy=self.copy)

            if sparse.issparse(X):
                statistics = self._sparse_fit(X,
                                              self.strategy,
                                              self.missing_values,
                                              self.axis)

            else:
                statistics = self._dense_fit(X,
                                             self.strategy,
                                             self.missing_values,
                                             self.axis)
        else:
            X = check_array(X, accept_sparse='csc', dtype=FLOAT_DTYPES,
                            force_all_finite=False, copy=self.copy)
            statistics = self.statistics_

        # Delete the invalid rows/columns
        invalid_mask = np.isnan(statistics)
        valid_mask = np.logical_not(invalid_mask)
        valid_statistics = statistics[valid_mask]
        valid_statistics_indexes = np.where(valid_mask)[0]
        missing = np.arange(X.shape[not self.axis])[invalid_mask]

        if self.axis == 0 and invalid_mask.any():
            if self.verbose:
                warnings.warn("Deleting features without "
                              "observed values: %s" % missing)
            X = X[:, valid_statistics_indexes]
        elif self.axis == 1 and invalid_mask.any():
            raise ValueError("Some rows only contain "
                             "missing values: %s" % missing)

        # Do actual imputation
        if sparse.issparse(X) and self.missing_values != 0:
            mask = _get_mask(X.data, self.missing_values)
            indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
                                np.diff(X.indptr))[mask]

            X.data[mask] = astype(valid_statistics[indexes], X.dtype,
                                  copy=False)
        else:
            if sparse.issparse(X):
                X = X.toarray()

            mask = _get_mask(X, self.missing_values)
            n_missing = np.sum(mask, axis=self.axis)
            values = np.repeat(valid_statistics, n_missing)

            if self.axis == 0:
                coordinates = np.where(mask.transpose())[::-1]
            else:
                coordinates = mask

            X[coordinates] = values

        return X