コード例 #1
0
 def crossentropy(predictions: np.ndarray, targets: np.ndarray) -> float:
     assert predictions.__len__() == targets.__len__()
     J = 0.0
     m = targets.__len__()
     for target, prediction in zip(targets, predictions):
         J -= np.sum(target * np.log(prediction) +
                     (1 - target) * np.log(1 - prediction))
     return J / m
コード例 #2
0
ファイル: util.py プロジェクト: radinhamidi/CNN-Denoiser
def crossValidate(data: np.ndarray, split1, split2):
    data = np.asarray(data)
    m = data.__len__()
    idx = np.random.permutation(m)
    data = data[idx]
    return data[:int(split1 * m), :], data[int(split1 * m):int(
        (split1 + split2) * m), :], data[int((split1 + split2) * m):, :], idx
コード例 #3
0
def compare_result(a: ndarray, x1: ndarray, x2: ndarray):
    print("x\tEquation1\tEquation1\teps")
    for i in range(x1.__len__()):
        print(f"{i + 1}\t{x1[i]}\t{x2[i]}\t{x1[i] - x2[i]}")
    print()
    cond, delta = cond_delta(a, x1, x2)
    print(f"Cond\t{cond}")
    print(f"Delta\t{delta}")
コード例 #4
0
def get_composition_just_tang(this_tang: ndarray,
                              include_padding=False,
                              max_inversion_distance: float = 0.0):
    tokens: List[tuple] = []
    padding = []
    start_index = 0
    currently_clustering = False
    big_endian = True
    last_bit_position = 0

    # Consider each element in the TANG. The TANG is an ndarray with index being bit position from the
    # original CAN data. The cell value is the observed transition frequency for that bit position.
    for i, bit_position in enumerate(nditer(this_tang)):
        # Is this a padding bit?
        if bit_position <= 0.000001:
            padding.append(i)
            # Are we clustering padding bits? If so, proceed to the normal clustering logic. Else, do the following.
            if not include_padding:
                if currently_clustering:
                    # This is padding, we're already clustering, and we're not clustering padding; save the token.
                    tokens.append((start_index, i - 1))
                    currently_clustering = False
                    start_index = i + 1
                    last_bit_position = bit_position
                continue

        # Are we still enlarging the current token?
        if currently_clustering:
            if bit_position >= last_bit_position and big_endian:
                pass
            elif bit_position <= last_bit_position and not big_endian:
                pass
            # Are we allowing inversions (max_inversion_distance > 0)? If so, check if this inversion is acceptable.
            elif abs(bit_position -
                     last_bit_position) <= max_inversion_distance:
                pass
            # Is this the second bit position we need to establish the endian of the signal?
            elif start_index == i - 1:
                if bit_position >= last_bit_position:
                    big_endian = True
                else:
                    big_endian = False
            # This is an unacceptable transition frequency inversion, save the current token and start a new one
            else:
                tokens.append((start_index, i - 1))
                start_index = i
        # We aren't currently clustering and we intend to cluster this bit position
        else:
            currently_clustering = True
            start_index = i

        last_bit_position = bit_position

    # We reached the last bit position while clustering. Add this final token.
    if currently_clustering:
        tokens.append((start_index, this_tang.__len__() - 1))

    return tokens, padding
コード例 #5
0
 def __init__(self, node_input: np.ndarray, af: Activation,
              bias: bool):
     self.__node_input = node_input
     self.__is_biased = bias
     self.__weight = np.empty(node_input.__len__())
     self.__weight.fill(0)
     self.__bias = 0
     self.af = af
     self.__output = 0
     self.local_gradient = 0
コード例 #6
0
ファイル: util.py プロジェクト: radinhamidi/CNN-Denoiser
def kfold(data: np.ndarray, k):
    m = data.__len__()
    idx = np.random.permutation(m)
    kfoldData = []
    for i in range(k):
        training = [x for j, x in enumerate(idx) if j % k != i]
        validation = [x for j, x in enumerate(idx) if j % k == i]
        train = data[training]
        test = data[validation]
        kfoldData.append([train, test])
    return np.asarray(kfoldData), idx
コード例 #7
0
 def error_energy_per_pattern(self, target: np.ndarray):
     self.forward_pass()
     n = self._nodes.__len__()
     summation = 0
     if target.__len__() == n:
         for i in range(0, n):
             e = target[i] - self._nodes[i].output
             summation = summation + e * e
         return 0.5 * summation
     else:
         return -1
コード例 #8
0
 def backward_pass(self, target: np.ndarray):
     n = self._nodes.__len__()
     if target.__len__() == n:
         for i in range(0, n):
             a = self._nodes[i].output
             act = self._nodes[i].af
             if act == Activation.SIGMOID:
                 self._nodes[i].local_gradient = (target[i] -
                                                  a) * a * (1 - a)
             elif act == Activation.TANH:
                 self._nodes[i].local_gradient = (target[i] -
                                                  a) * (1 + a) * (1 - a)
コード例 #9
0
 def accuracy(predictions: np.ndarray, targets: np.ndarray) -> float:
     # target is (n,1) with values in 1, ..., m,
     # predictions is (n,m) with values between 0 and 1
     epsilon = 1e-6
     targetValues = binary_vector_to_class(targets)
     predictionValues = binary_vector_to_class(predictions)
     predictionAccuracy = 0.0
     for prediction, target in zip(predictionValues, targetValues):
         # print(prediction,target)
         if abs(prediction - target) < epsilon:
             predictionAccuracy += 1.0
     return predictionAccuracy / float(targets.__len__())
コード例 #10
0
def _draw_inequal_dataset(
    target: np.ndarray, class_sample_nums: Dict[int, int], excluded_index: List[int]
) -> np.ndarray:
    available_index = list(set(list(range(target.__len__()))) - set(excluded_index))
    return_list: List[int] = []
    for _target, sample_num in class_sample_nums.items():
        _target_index = np.where(target == _target)[0].tolist()
        _available_index = list(set(available_index) & set(_target_index))
        return_list.extend(
            np.random.permutation(_available_index)[:sample_num].tolist()
        )
    assert set(excluded_index) & set(return_list) == set()
    return np.array(return_list)
 def __init__(self, sphere_id: int, tiles: list, num_of_tile: int,
              resources: np.ndarray):
     self.sphere_id: int = int(sphere_id)
     self.num_of_tile = num_of_tile
     self.tiles = tiles  # size of fov [x, y]
     self.transmission_mask = np.ones(self.num_of_tile, dtype=bool)
     if resources.size == 0:
         self.resource = np.arange(0, self.tiles[0] * self.tiles[1])
     else:
         if type(resources).__module__ != np.__name__:
             raise TypeError("Resource input should be numpy array")
         if self.num_of_tile != resources.__len__():
             raise ValueError("Size of resources not equal")
         if len(resources.shape) != 1:
             raise ValueError("Resource dimension should be 1")
         self.resource = resources
     self.its_resource = self.resource
コード例 #12
0
def sixty_fps(rate, data:np.ndarray):
     def average(data, index, extract_count):
          sum = 0
          for i in range(extract_count):
               sum += data[index - i]
          return sum / extract_count

     extract_count = int(rate / 1000)
     extracted_list = list()
     index = extract_count
     while index < data.__len__():
          bit = data[index]
          # bit = average(data, index, extract_count)
          extracted_list.append(bit)
          index += extract_count
     ret = np.array(extracted_list)
     return ret
コード例 #13
0
def find_sea_monster(stitched_image: np.ndarray):
    """Checks for sea-monsters in the given `stitched_image`"""
    correct_orientation = False  # boolean to flag whether the image is rotated correctly (for returning a boolean
    # if the given `stitched_image` is correctly oriented and thus contains any sea-monsters)
    #
    # offsets of "#" pixel that also have to be "#" in format of (row_offset, col_offset) starting with leftmost
    # pixel in sea-monster pattern -> Represents the Hashtag-pattern in a sea-monster
    offsets = [(0, 0), (1, 1), (1, 4), (0, 5), (0, 6), (1, 7), (1, 10),
               (0, 11), (0, 12), (1, 13), (1, 16), (0, 17), (0, 18), (0, 19),
               (-1, 18)]
    # loop through all rows (first and last row are ignored, because I identify sea-monsters starting from the leftmost
    # pixel in the second row of the sea-monster pattern
    for row_index in range(1, stitched_image.__len__() - 1):
        # loop through all columns (the last 19 columns are ignored because a sea-monster is 20 pixels long and thus
        # the starting pixel has to be >= 20 pixels away from right border)
        for col_index in range(0, stitched_image[row_index].__len__() - 19):
            # flag if current pixel at [row_index][col_index] can be a starting pixel of a sea-monster
            possible = True
            # go through all offsets to check (from the starting pixel)
            for row_off, col_off in offsets:
                # if there is no "#" at the offset positions
                if stitched_image[row_index + row_off][col_index +
                                                       col_off] != "#":
                    # there is no sea monster starting at the starting pixel
                    possible = False
                    # break out (because other offsets don't need to be checked if one has already failed)
                    break
            # if starting pixel is the beginning of a sea-monster
            if possible:
                # mark all the "#" in it with "O" (to later count the residual "#")
                for row_off, col_off in offsets:
                    stitched_image[row_index + row_off][col_index +
                                                        col_off] = "O"
                    # set the flag for 'correct_orientation' to True to signal that the given image is
                    # rotated correctly and does contain one or more sea-monsters
                    correct_orientation = True
    # return if given image was oriented correctly
    return correct_orientation
コード例 #14
0
 def weight(self, value: np.ndarray):
     if value.__len__() == self.__weight.__len__():
         self.__weight = value
コード例 #15
0
ファイル: mser.py プロジェクト: xhlove/SubTimeLine
def filter_box(bboxes: np.ndarray, img: np.ndarray, half_width: float):
    if bboxes.__len__() == 0:
        return bboxes, (0, 0, 0, 0), 0
    # 按x位置对box从左到右排序
    bboxes = sorted(bboxes, key=lambda box: box[0])
    # print(bboxes)
    # draw_box(img, bboxes, title="MSER")
    # 两个box相交 那么合并这两个box 注意box已经按x大小排序过了的
    _bboxes = [bboxes[0]]
    box_x, box_y, box_w, box_h = bboxes[0]
    for x, y, w, h in bboxes[1:]:
        if x >= box_x and x <= box_x + box_w:
            if y + h >= box_y and y + h <= box_y + box_h:
                box = [
                    box_x,
                    min(box_y, y),
                    max(x + w, box_x + box_w) - box_x,
                    box_y + box_h - min(box_y, y)
                ]
                box_x, box_y, box_w, box_h = box
                _bboxes[-1] = box
            elif y >= box_y and y <= box_y + box_h:
                box = [
                    box_x, box_y,
                    max(x + w, box_x + box_w) - box_x,
                    max(y + h, box_y + box_h) - box_y
                ]
                box_x, box_y, box_w, box_h = box
                _bboxes[-1] = box
            else:
                # 说明这个box虽然在x方向与目标box有交集 在y方向没有 舍弃
                pass
        else:
            box_x, box_y, box_w, box_h = x, y, w, h
            _bboxes.append([x, y, w, h])
    # print(_bboxes)
    # draw_box(img, _bboxes, title="MSER_CONCAT")
    if len(_bboxes) > 1:
        # 只有一个box不需要进行这一步判断
        bboxes = filter_extreme_box(_bboxes, half_width)
        # draw_box(img, bboxes, title="MSER_RM_EXTREME")
    else:
        bboxes = _bboxes
    xs, ys, ws, hs, _xs, _wh = [], [], [], [], [], []
    _ = [(xs.append(x), ys.append(y), ws.append(x + w), hs.append(y + h),
          _xs.append([x, x + w]), _wh.append(w / h)) for x, y, w, h in bboxes]
    # 对称性判断 有的字幕不一定对称 暂时不加这个
    data_distances = [(((x + w) / 2) - half_width) / half_width
                      for x, w in zip(xs, ws)]
    if data_distances.__len__() > 2:
        # 全部在一边 也认为是没有字幕的
        _data_distances = np.array(data_distances, dtype="float64")
        zero_reduce = _data_distances[_data_distances < 0].shape[0]
        zero_plus = _data_distances[_data_distances > 0].shape[0]
        if zero_plus == 0 or zero_reduce == 0:
            return (), (0, 0, 0, 0), 0
    # 很近的时候就不用加入了
    # data_distances = [abs(_) for _ in data_distances if abs(_) > 0.1]
    # if data_distances.__len__() > 1:
    #     max_distance = np.median(data_distances) * 1.5
    #     # print(f"data_distances -> {data_distances} max_distance -> {max_distance}")
    #     remove_indices = [index for index, distance in enumerate(data_distances) if distance > max_distance or _wh[index] > 4]
    #     bboxes = [box for index, box in enumerate(bboxes) if index not in remove_indices]
    if bboxes.__len__() == 0:
        return bboxes, (0, 0, 0, 0), 0
    if bboxes.__len__() == 1:
        max_space = 0
    if bboxes.__len__() > 1:
        # 只有大于1个box这样做才有意义
        xs, ys, ws, hs, _xs = [], [], [], [], []
        _ = [(xs.append(x), ys.append(y), ws.append(x + w), hs.append(y + h),
              _xs.append([x, x + w])) for x, y, w, h in bboxes]
        _xs = sorted(_xs, key=lambda x: x[0])
        max_space = max(
            [_xs[i][0] - _xs[i - 1][1] for i in range(1, _xs.__len__())])
    return bboxes, (xs, ys, ws, hs), max_space
コード例 #16
0
def binary_confusion_matrix(
        prob: np.ndarray,
        gt_label_bool: np.ndarray,
        num_bins: int = 1024,
        bin_strategy='uniform',  # : Literal['uniform', 'percentiles'] = 'uniform',
        normalize: bool = False,
        dtype=np.float64):

    area = gt_label_bool.__len__()

    gt_area_true = np.count_nonzero(gt_label_bool)
    gt_area_false = area - gt_area_true

    prob_at_true = prob[gt_label_bool]
    prob_at_false = prob[~gt_label_bool]

    if bin_strategy == 'uniform':
        # bins spread uniforms in 0 .. 1
        bins = num_bins
        histogram_range = [0, 1]

    elif bin_strategy == 'percentiles':
        # dynamic bins representing the range of occurring values
        # bin edges are following the distribution of positive and negative pixels

        bins = [
            [0, 1],  # make sure 0 and 1 are included
        ]

        if prob_at_true.size:
            bins += [
                np.quantile(
                    prob_at_true,
                    np.linspace(0, 1, min(num_bins // 2, prob_at_true.size))),
            ]
        if prob_at_false.size:
            bins += [
                np.quantile(
                    prob_at_false,
                    np.linspace(0, 1, min(num_bins // 2, prob_at_false.size))),
            ]

        bins = np.concatenate(bins)

        # sort and remove duplicates, duplicated cause an exception in np.histogram
        bins = np.unique(bins)

        histogram_range = None

    # the area of positive pixels is divided into
    #	- true positives - above threshold
    #	- false negatives - below threshold
    tp_rel, _ = np.histogram(prob_at_true, bins=bins, range=histogram_range)
    # the curve goes from higher thresholds to lower thresholds
    tp_rel = tp_rel[::-1]
    # cumsum to get number of tp at given threshold
    tp = np.cumsum(tp_rel)
    # GT-positives which are not TP are instead FN
    fn = gt_area_true - tp

    # the area of negative pixels is divided into
    #	- false positives - above threshold
    #	- true negatives - below threshold
    fp_rel, bin_edges = np.histogram(prob_at_false,
                                     bins=bins,
                                     range=histogram_range)
    # the curve goes from higher thresholds to lower thresholds
    bin_edges = bin_edges[::-1]
    fp_rel = fp_rel[::-1]
    # cumsum to get number of fp at given threshold
    fp = np.cumsum(fp_rel)
    # GT-negatives which are not FP are instead TN
    tn = gt_area_false - fp

    cmat_sum = np.array([
        [tp, fp],
        [fn, tn],
    ]).transpose(2, 0, 1).astype(dtype)

    # cmat_rel = np.array([
    # 	[tp_rel, fp_rel],
    # 	[-tp_rel, -fp_rel],
    # ]).transpose(2, 0, 1).astype(dtype)

    if normalize:
        cmat_sum *= (1. / area)
        # cmat_rel *= (1./area)

    return EasyDict(
        bin_edges=bin_edges,
        cmat_sum=cmat_sum,
        # cmat_rel = cmat_rel,
        tp_rel=tp_rel,
        fp_rel=fp_rel,
        num_pos=gt_area_true,
        num_neg=gt_area_false,
    )
コード例 #17
0
 def squared_error(predictions: np.ndarray, targets: np.ndarray) -> float:
     assert predictions.__len__() == targets.__len__()
     m = targets.__len__()
     diff = (predictions - targets)
     return sum(diff[:] * diff[:]) / 2.0 / m