Ejemplo n.º 1
0
    def run_step(self, vehicle: Vehicle, new_data: np.array):
        """
        This function assumes that the function calling it will set the variable self.curr_depth_img
        In the first run of this function,
            it will remember the input depth image as self._test_depth_img
        In the second run of this function,
            it will calculate the prediction matrix
        In the preceding runs, it will use the prediction matrix to find ground plane


        Args:
            vehicle: current vehicle state
            new_data: current frame for this detector

        Returns:
            None

        """
        super(SemanticSegmentationDetector, self).run_step(vehicle, new_data)
        if self._test_depth_img is None:
            self._test_depth_img = png_to_depth(new_data)
            return
        elif self._predict_matrix is None:
            # try calibrate on the second frame received
            xs = []
            data = []
            depth_array = png_to_depth(new_data)
            # depth_image = calibration image, grab from somewhere

            for i in range(self._sky_line_level + 10, depth_array.shape[0]):
                j = np.argmax(depth_array[i, :])

                if depth_array[i][j] > self._min_caliberation_boundary:
                    xs.append(i)
                    data.append(depth_array[i][j])
            a, b, c, p, q = self.fit(np.array(xs, dtype=np.float64),
                                     np.array(data, dtype=np.float64))
            test_image = self._test_depth_img
            pred_func = self.construct_f(a, b, c, p, q)
            rows = np.meshgrid(np.arange(test_image.shape[1]),
                               np.arange(test_image.shape[0]))[1]
            self._predict_matrix = pred_func(rows)
            return
        else:
            depth_array = png_to_depth(new_data.copy(
            ))  # this turns it into 2D np array of shape (Width x Height)
            semantic_seg = np.zeros(shape=np.shape(new_data))

            # find sky and ground
            sky = np.where(depth_array == 1)
            ground = np.where(
                np.abs(depth_array - self._predict_matrix) >
                self._max_detectable_distance_threshold)

            semantic_seg[ground] = [255, 255, 255]
            semantic_seg[sky] = [255, 0, 0]  # BGR???
            self.semantic_segmentation = semantic_seg
Ejemplo n.º 2
0
def bitwise_xor(frame: np.array, mask: np.array):
    """
    Generates bitwise xor for a frame and mask.
    :param frame:
    :param mask:
    :return:
    """
    frame = frame.copy()
    return cv2.bitwise_xor(frame, frame, mask=mask)
Ejemplo n.º 3
0
def square_invert(A: np.array) -> np.array:
    """Вычисление обратной матрицы методом квадратного корня."""

    A = A.copy()
    E = np.identity(len(A))
    inv_A = np.zeros(A.shape)
    for i in range(len(A)):
        inv_A[i] = square_root_method(A, E[i], True)
    return inv_A
Ejemplo n.º 4
0
def find_sudoku(img: np.array, debug=False):
    # Convert image to grayscale and add blur
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img_blurred = cv2.GaussianBlur(img_gray, (7, 7), 3)

    # Apply inverted binary adaptive thresholding
    img_thresh = cv2.adaptiveThreshold(img_blurred, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY_INV, 11, 2)

    if debug:
        cv2.imshow("Sudoku with Threshold Filter", img_thresh)
        cv2.waitKey(0)

    # Find countours in thresholded image
    contours, _ = cv2.findContours(img_thresh.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    contours = sorted(contours, key=cv2.contourArea,
                      reverse=True)  # largest contour is first element

    # Find outer contour
    sudoku_contour = None
    for c in contours:
        # Approximation of contour
        perimeter = cv2.arcLength(c, True)
        approximation = cv2.approxPolyDP(
            c, 0.02 * perimeter,
            True)  # use perimeter of contour for approximation accuracy

        # Assume the first contour with 4 points to be the outline of the grid
        if len(approximation) == 4:
            sudoku_contour = approximation
            break

    # No outline found
    if sudoku_contour is None:
        raise Exception("Could not find Sudoku grid outline.")

    # Show debug output
    if debug:
        output = img.copy()
        cv2.drawContours(output, [sudoku_contour], -1, (0, 255, 0), 2)
        cv2.imshow("Sudoku Outline", output)
        cv2.waitKey(0)

    # Apply four point perspective transform to obtain a top-down perspective
    img_sudoku = perspective.four_point_transform(img,
                                                  sudoku_contour.reshape(4, 2))
    img_gray = perspective.four_point_transform(img_gray,
                                                sudoku_contour.reshape(4, 2))

    if debug:
        cv2.imshow("Sudoku Transform", img_sudoku)
        cv2.waitKey(0)

    # Return a tuple of Sudoku in both RGB and grayscale
    return (img_sudoku, img_gray, sudoku_contour)
Ejemplo n.º 5
0
 def predict(self, X: np.array) -> np.array:
     """
     predict function
     :param X: input data of shape (num_data_points, dim)
     :return: np.array of shape (num_data_points) with class labels
     """
     for layer in self.layers:
         X = layer.forward(X.copy())
     return np.argmax(X, axis=1)
Ejemplo n.º 6
0
def bitwise_and(frame: np.array, mask: np.array):
    """
    Generates bitwise and for a frame and mask.
    :param frame:
    :param mask:
    :return: Frame with either black or white
    """
    frame = frame.copy()
    return cv2.bitwise_and(frame, frame, mask=mask)
Ejemplo n.º 7
0
def get_boundaries(arr: np.array, reduction: float) -> np.array:
    """Retrieves the boundaries given a reduction size"""

    unique = np.unique(arr.copy())
    x = unique[:-1]
    y = unique[1:]
    boundary = (x + y) / 2

    return boundary[::reduction]
Ejemplo n.º 8
0
def un_one_hot(one_hot: np.array, temperature: float = 1) -> np.array:
    weights = one_hot.copy().flatten()
    for i in range(0, weights.shape[0]):
        weights[i] = m.exp(m.log(weights[i]) / temperature)
    s = np.sum(weights)
    for i in range(0, weights.shape[0]):
        weights[i] = weights[i] / s
    result = np.random.choice(weights.shape[0], p=weights)
    return result
    def array(cls,
              x_arr: np.array,
              allow_diagonal: bool = True,
              background: np.int = 0) -> List[np.array]:
        """
        :param x_arr: np.array(int), array to split
        :param allow_diagonal: bool, whether or not regard diagonal connected
        :param background: int, must be one of 0-9
        :return: List[np.array(np.int)]
        """
        res_list = []
        r, c = x_arr.shape
        con_map = np.zeros((r, c), dtype=np.int)
        ind = 0
        for i in range(r):
            for j in range(c):
                if x_arr[i, j] != background and con_map[i, j] == 0:
                    # start search
                    ind += 1
                    queue = deque()
                    queue.append((i, j))
                    con_map[i, j] = ind
                    while len(queue) > 0:
                        p = queue.popleft()
                        pi, pj = p
                        for q in neighbors(p, r, c, allow_diagonal):
                            qi, qj = q
                            if x_arr[pi, pj] == x_arr[
                                    qi, qj] != background and con_map[qi,
                                                                      qj] == 0:
                                con_map[qi, qj] = ind
                                queue.append((qi, qj))

        # trivial case
        if ind == 0:
            return [x_arr.copy()]

        for s in range(ind):
            x_arr_s = x_arr.copy()
            x_arr_s[con_map != s + 1] = background
            res_list.append(x_arr_s)

        return list(
            sorted(res_list, key=lambda res: -(res != background).sum()))
Ejemplo n.º 10
0
def build_image(img_base: np.array) -> np.array:
    '''
    Builds image array from base image and objects.
    '''

    img = img_base.copy()
    for x in fighters:
        x.step(img)

    return img
Ejemplo n.º 11
0
 def forward(self, X: np.array) -> np.array:
     """
     compute softmax on given input
     :param X: np.array of size (num_data_points, num_classes)
     :return: np.array of softmax values for every datapoint
     """
     X = np.exp(X - np.max(X, axis=1).reshape(-1, 1))
     X /= np.sum(X, axis=1).reshape(-1, 1)
     self.last_output = X.copy()
     return X
Ejemplo n.º 12
0
 def _initial_jacobian(self, F: np.array, X: np.array,
                       runtime_var: "function") -> np.array:
     """Return an approximation of initial Jacobian using finite differences."""
     J = np.zeros((3, 3))
     for j in range(3):
         Y = X.copy()
         Y[j] += self._h
         DF = self._function_to_root(Y, runtime_var)
         J[:, j] = DF - F
     return J / self._h
Ejemplo n.º 13
0
def mirroData(data: np.array):
    mirror = data.copy()
    for row in mirror:
        if random() > mirror_proba:
            for i in range(max_light):
                if int(row[pre_base + i]) == 0:
                    continue
                row[4 * i] = br[0] - row[4 * i]
                row[4 * i + 2] *= -1
    return mirror
Ejemplo n.º 14
0
def get_smoothed_prices(prices: np.array, window: int) -> np.array:
    """the window should be an odd number"""
    if window == 1 or window == 0:
        return prices
    else:
        prices = prices.copy()
        temp = np.cumsum(prices)
        temp = temp[window:] - temp[:-window]
        prices[window // 2 + 1:-(window // 2)] = temp / window
        return prices
Ejemplo n.º 15
0
    def forward_pass(self, input_array: np.array):
        # reinitialize
        self.x_array = []

        x = input_array.copy()
        self.x_array.append(x)
        for i in range(len(self.weight_matrix)):
            x = self.activation_layer[i](np.matmul(self.weight_matrix[i], x) + self.bias_matrix[i])
            self.x_array.append(x)
        return x
Ejemplo n.º 16
0
 def get_seg_output(self,image:np.array):
     image = self.transform(image.copy())
     print(image.shape)
     with torch.no_grad():
         pred = self.model([image])
         
     #outputs = [(pred[0]['masks'][i][0],pred[0]['labels'][i]) for i in range(len(pred[0]['boxes'])) if pred[0]['scores'][i]>self.conf_thresh and pred[0]['labels'][i]==1]
     outputs = [(pred[0]['masks'][i][0],pred[0]['labels'][i]) for i in range(len(pred[0]['boxes'])) if pred[0]['scores'][i]>self.conf_thresh]
     
     return outputs
Ejemplo n.º 17
0
def assign_points_range(grid: np.array, points: List[Tuple[int, Tuple[int,
                                                                      int]]],
                        max: int) -> np.array:
    grid = grid.copy()
    for i in range(grid.shape[0]):
        for j in range(grid.shape[1]):
            dist = sum([city_distance((i, j), p) for t, p in points])
            if dist < 10000:
                grid[i, j] = 1
    return grid
Ejemplo n.º 18
0
 def _bounded_step(self, position: np.array,
                   velocity: np.array) -> np.array:
     assert self._states[tuple(
         position)], "The given position is not a valid state."
     new_position = np.maximum(
         0, np.minimum(position + velocity, self.dimensions - 1))
     if not self._states[tuple(new_position)]:
         # Impassible state; remain in place
         return position.copy()
     return new_position
def _iteration(B: np.array, x: np.array, c: np.array):
    """
    Completes one iteration of the method.
    """
    y = x.copy()
    
    for i in range(len(B)):
        y[i] = c[i] + (B[i] * y).sum()

    return y
Ejemplo n.º 20
0
def bitwise_and(frame: np.array, mask: np.array) -> np.array:
    """
    Uses: Better display of masks, edge detection.

    :param frame: A frame.
    :param mask: A mask.
    :return: The part of the frame cut out by the mask. The 0's in the mask draw 0's over the frame.
    """
    frame = frame.copy()
    return cv2.bitwise_and(frame, frame, mask=mask)
Ejemplo n.º 21
0
def bi_directional_shape_broadcasting(input_shape_1: np.array,
                                      input_shape_2: np.array):
    """
    Bi-directional broadcasting of two shapes following numpy semantic
    :param input_shape_1: first shape to broadcast
    :param input_shape_2: second shape to broadcast
    :return: broadcasted shape or None if broadcasting cannot be performed
    """
    shape_1 = input_shape_1.copy()
    shape_2 = input_shape_2.copy()
    shape_1, shape_2 = make_equal_rank(shape_1, shape_2)

    for left, right in zip(shape_1, shape_2):
        if left != right and left != 1 and right != 1:
            log.debug('The shape "{}" cannot be broadcasted to "{}"'.format(
                input_shape_1, input_shape_2))
            return None

    return np.maximum(shape_1, shape_2)
Ejemplo n.º 22
0
    def add_step(self, image: np.array, action, reward):
        target_shape = (1, *image.shape)
        image = image.reshape(target_shape)
        if self.images is None:
            self.images = image.copy()
        else:
            self.images = np.concatenate((self.images, image))

        self.actions.append(action)
        self.rewards.append(reward)
Ejemplo n.º 23
0
 def step(self, game_state: np.array, last_step: int):
     opts = get_possible_steps(game_state)
     min_val, min_step = 6, opts[0]
     for i in opts:
         board_local = game_state.copy()
         highest_index = np.where(board_local[:, i] == 0)[0][-1]
         if highest_index < min_val:
             min_val = highest_index
             min_step = i
     return min_step
Ejemplo n.º 24
0
        def step(cls, func, t: np.float, u: np.array, dt):
            def f(ti, ui):
                return np.array([function(ti, ui) for function in func])

            # compute the first 3 point (start-up predictor solver)

            if cls.first_startup:
                cls.um3 = u.copy()
                unext = rungekutta.RK4.step(func, t, cls.um3, dt)
                t += dt
                cls.first_startup = False
            elif cls.second_startup:
                cls.um2 = u.copy()
                unext = rungekutta.RK4.step(func, t, cls.um2, dt)
                t += dt
                cls.second_startup = False
            elif cls.third_startup:
                cls.um1 = u.copy()
                unext = rungekutta.RK4.step(func, t, cls.um1, dt)
                t += dt
                cls.third_startup = False
            else:
                up = u + dt/24.* (55. * f(t,u) - 59. * f(t-dt,cls.um1) + 37.*f(t-(2*dt),cls.um2) \
                                 - 9. * f(t-(3*dt),cls.um3))
                fpred = f(t + dt, up)  #
                error = 1.
                fold = fpred
                iter = 0
                while True:
                    uold = u + dt / 24. * (9. * fold + 19. * f(t, u) - 5 * f(
                        t - dt, cls.um1) + 1 * f(t - (2 * dt), cls.um2))
                    fold = f(t + dt, uold)
                    unew = u + dt / 24. * (9. * fold + 19. * f(t, u) - 5 * f(
                        t - dt, cls.um1) + 1 * f(t - (2 * dt), cls.um2))
                    fnew = f(t + dt, unew)
                    error = np.abs(unew - uold)
                    if error <= cls.toll:
                        break
                cls.um3 = cls.um2
                cls.um2 = cls.um1
                cls.um1 = u
                unext = unew
            return unext
Ejemplo n.º 25
0
def squeeze_random_regions(x: np.array, fraction: int, n_regions: int, coef: float = .8, normalize: bool = True):
    """Multiply `n_regions` random parts of `x` with the size (len(x) // fraction) by `coef`"""
    new_array = x.copy()

    for _ in range(n_regions):
        start = np.random.randint(len(x) // 2)
        end = start + len(x) // fraction
        new_array = squeeze_function(new_array, coef=coef, from_=start, to=end, normalize=normalize)

    return new_array
Ejemplo n.º 26
0
def find_start(target: int, d: np.array):
    for noun in range(100):
        for verb in range(100):
            opcode = d.copy()
            opcode[1] = noun
            opcode[2] = verb
            res = run_opcode(opcode)
            if res == 19690720:
                return noun, verb
    return None
def add_axis(x: np.array, indices, ext_indices):
    """Adds numpy (dummy) dimensions for vectorized handling of proababilty tables"""
    assert set(indices) <= set(
        ext_indices
    ), "Seperator variables are not a subset of clique variables"
    axes = [ext_indices.index(a) for a in ext_indices if a not in indices]
    r = x.copy()
    for axis in axes:
        r = np.expand_dims(r, axis)
    return r
def solve(A: np.array, b: np.array, interactive) -> np.array:
    """
    Solves A*x = b that is returns x-vector.
    """

    if interactive:
        print('Схема единственного деления:\n')
        input()

    # securing args:
    A = A.copy()
    b = b.copy()

    # прямой ход:
    if interactive:
        print('Прямой ход:\n')
        input()

    for i in range(len(A)):
        assert A[i, i] != 0 and A[i, i] != 0.
        _eliminate(A, b, i)
        if interactive:
            print('''Step {0} completed: the A-matrix and b-vector are now:
            \n{1}\n{2}\n'''.format(i + 1, A, b))
            input()

    # обратный ход:
    if interactive:
        print('Обратный ход:\n')
        input()

    x = b

    for k in range(len(A) - 1, -1, -1):
        for m in range(len(A) - 1, k, -1):
            x[k] -= A[k, m] * x[m]
        x[k] /= A[k, k]

        if interactive:
            print('x[{0}] calculated: {1}'.format(k + 1, x[k]))
            input()

    return x
Ejemplo n.º 29
0
 def solve(self, grid: np.array) -> np.array:
     grid = grid.copy()
     solver = RuleBasedSolver(grid)
     solver.solve_until_stuck()
     step = BeamSearchStep(grid, solver.possibilities, [], (grid != 0).sum())
     results = self._walk(step)
     try:
         return results[0].grid
     except IndexError:
         return None
Ejemplo n.º 30
0
def fill_somewhere(x_arr: np.array, del_color: int) -> np.array:

    assert min(x_arr.shape) >= 15
    assert x_arr.min() >= 0

    if (x_arr == del_color).sum() == 0:
        return x_arr

    x_arr_copy = x_arr.copy()
    x_arr_copy[x_arr == del_color] = -1

    x_sum = (x_arr == del_color).sum(axis=1)
    y_sum = (x_arr == del_color).sum(axis=0)

    x0 = min([i for i in range(x_arr.shape[0]) if x_sum[i]])
    x1 = max([i for i in range(x_arr.shape[0]) if x_sum[i]]) + 1
    y0 = min([i for i in range(x_arr.shape[1]) if y_sum[i]])
    y1 = max([i for i in range(x_arr.shape[1]) if y_sum[i]]) + 1

    x0_ = max(x0 - 1, 0)
    x1_ = min(x1 + 1, x_arr.shape[0])
    y0_ = max(y0 - 1, 0)
    y1_ = min(y1 + 1, x_arr.shape[1])

    search_arr = x_arr_copy[x0_:x1_, y0_:y1_].copy()

    for t in range(4):
        for i in range(x_arr_copy.shape[0] - search_arr.shape[0] + 1):
            for j in range(x_arr_copy.shape[1] - search_arr.shape[1] + 1):
                # perfect on >= 0
                match_arr = (
                    x_arr_copy[i:i + search_arr.shape[0], j:j +
                               search_arr.shape[1]] == search_arr).astype(int)
                match_arr[x_arr_copy[i:i + search_arr.shape[0],
                                     j:j + search_arr.shape[1]] < 0] = 1
                match_arr[search_arr < 0] = 1
                if match_arr.min() == 0:
                    continue
                # no match on negative
                if x_arr_copy[i:i + search_arr.shape[0],
                              j:j + search_arr.shape[1]].min() < 0:
                    continue

                new_v = x_arr_copy[i:i + search_arr.shape[0],
                                   j:j + search_arr.shape[1]].copy()
                if t % 2 == 0:
                    new_v = new_v.reshape((x1_ - x0_, y1_ - y0_))
                else:
                    new_v = new_v.reshape((y1_ - y0_, x1_ - x0_))
                x_arr_copy[x0_:x1_, y0_:y1_] = np.rot90(new_v, 4 - t)
                return x_arr_copy

        search_arr = np.rot90(search_arr)

    raise AssertionError
Ejemplo n.º 31
0
def causal_rect_window_filter(data: np.array, window_length: int) -> np.array:
    window_length = np.round(window_length)
    if window_length > 0:
        front_pad = window_length - 1
        end_pad = 0
        norm_window = np.repeat(1.0, window_length) / window_length
        data_long = np.pad(data, ((front_pad, end_pad),), mode='edge')
        # avoid fftconvolve as it distributes one input NaN to entire output
        data_filtered = sps.convolve(data_long, norm_window, 'valid')
    else:
        data_filtered = data.copy()
    return data_filtered
Ejemplo n.º 32
0
    def __init__(self, list_lagged_parameters: list,
                 response_array: np.array,
                 time_array: pd.DatetimeIndex,
                 use_constant: bool=True,
                 data_variance: np.array=None):
        self.list_lagged_parameters = list_lagged_parameters.copy()
        self.response_array = response_array.copy()
        self.time_array = time_array.values.copy()
        self.use_constant = use_constant
        self.use_weights = data_variance is not None

        self.num_params = len(self.list_lagged_parameters)
        self.list_labels = []
        self.max_shift = 0

        if self.use_constant:
            self.num_params += 1
            self.list_labels.append('Constant')

        # Find max shift, necessary to clip all data to the same size
        for param in self.list_lagged_parameters:
            self.list_labels.append(param.label)
            if param.shift_size > self.max_shift:
                self.max_shift = param.shift_size

        # Clip initial points in the un-shifted response and time data
        self.response_array = self.response_array[self.max_shift:]
        self.time_array = self.time_array[self.max_shift:]
        self.clipped_data_length = len(self.response_array)
        if self.use_weights:
            self.weights = 1.0 / data_variance[self.max_shift:]

        self.list_lagged_arrays = []
        if self.use_constant:
            self.constant_array = np.ones(self.clipped_data_length)
            self.list_lagged_arrays.append(self.constant_array)

        # Extract clipped views of the lagged parameters
        for param in self.list_lagged_parameters:
            shift = self.max_shift - param.shift_size
            self.list_lagged_arrays.append(param.data[shift:])

        # Range of data to use in fit
        self.start_index = 0
        self.end_index = self.clipped_data_length - 1

        self.has_results = False
        self.fit_matrix = None
        self.model = None
        self.results = None
        self.auto_correlation_matrix = None
        self.time_axis = None
        self.response_cut = None
Ejemplo n.º 33
0
def causal_hann_window_filter(data: np.array, window_length: int) -> np.array:
    window_length = np.round(window_length)
    if window_length > 0:
        front_pad = window_length
        end_pad = 1
        norm_window = sps.hann(window_length + 2)
        norm_window /= np.sum(norm_window)
        data_long = np.pad(data, ((front_pad, end_pad),), mode='edge')
        # avoid fftconvolve as it distributes one input NaN to entire output
        data_filtered = sps.convolve(data_long, norm_window, 'valid')
    else:
        data_filtered = data.copy()
    return data_filtered
Ejemplo n.º 34
0
def rect_window_filter(data: np.array, window_length: int) -> np.array:
    window_length = np.round(window_length)
    if window_length:
        if window_length % 2 == 0:  # even length window
            front_pad = window_length/2
            end_pad = window_length/2
            norm_window = np.repeat(1.0, window_length + 1)
            norm_window[0] = 0.5
            norm_window[-1] = 0.5
            norm_window /= window_length
        else:  # odd length window
            front_pad = (window_length - 1)/2
            end_pad = front_pad
            norm_window = np.repeat(1.0, window_length) / window_length
        data_long = np.pad(data, ((front_pad, end_pad),), mode='edge')
        data_filtered = sps.convolve(data_long, norm_window, 'valid')
    else:
        data_filtered = data.copy()
    return data_filtered
Ejemplo n.º 35
0
    def __init__(self, data: np.array,
                 label: str,
                 transition_length: int,
                 shift_size: int=0,
                 filter_type: str='hann'):
        """
        Process data to be lagged with a smooth transition determined by the filter type and shift.
        :param data: Data to be lagged.
        :param label: Label for data.
        :param transition_length: Number of points over which the lag transition occurs.
        :param shift_size: Number of points to shift the data.
        :param filter_type: Type of filter to use to generate the transition.
        :raise ValueError: Error if the filter_type is not valid.
        """
        self.data = data.copy()
        self.label = label
        self.transition_length = transition_length
        self.window_size = self.transition_length + 1
        self.shift_size = np.round(shift_size)

        dict_filter_type_func = {'none': None,
                                'rect': causal_rect_window_filter,
                                'hann': causal_hann_window_filter
                                }

        try:
            filter_func = dict_filter_type_func[filter_type]
        except KeyError:
            type_str = ', '.join(dict_filter_type_func.keys())
            raise ValueError(filter_type+' is not a valid type of binning. Valid types: '+type_str)

        self.use_shift = self.shift_size > 0
        if filter_func is not None:
            self.data = filter_func(self.data, self.window_size)

        if self.use_shift:
            self.data = self.data[:-self.shift_size]