Ejemplo n.º 1
0
def IoU(target_labels: np.array, predicted_labels: np.array):
    target_labels = target_labels.flatten()
    predicted_labels = predicted_labels.flatten()
    intersection = np.logical_and(target_labels, predicted_labels)
    union        = np.logical_or(target_labels, predicted_labels)
    iou_score    = (np.sum(intersection)+1e-6) / (np.sum(union)+1e-6)
    return iou_score
Ejemplo n.º 2
0
def shortwave(sw_rad: np.array, daylength: np.array, day_of_year: np.array,
              tiny_rad_fract: np.array, params: dict) -> np.array:
    """
    Disaggregate shortwave radiation down to a subdaily timeseries.

    Parameters
    ----------
    sw_rad:
        Daily incoming shortwave radiation
    daylength:
        List of daylength time for each day of year
    day_of_year:
        Timeseries of index of days since Jan-1
    tiny_rad_fract:
        Fraction of the daily potential radiation
        during a radiation time step defined by SW_RAD_DT
    params:
        Dictionary of parameters from the MetSim object

    Returns
    -------
    disaggrad:
        A sub-daily timeseries of shortwave radiation.
    """
    ts = int(params['time_step'])
    ts_hourly = float(ts) / cnst.MIN_PER_HOUR
    tmp_rad = (sw_rad * daylength) / (cnst.SEC_PER_HOUR * ts_hourly)
    n_days = len(tmp_rad)
    ts_per_day = int(cnst.HOURS_PER_DAY * cnst.MIN_PER_HOUR / ts)
    disaggrad = np.zeros(int(n_days * ts_per_day))
    rad_fract_per_day = int(cnst.SEC_PER_DAY / cnst.SW_RAD_DT)
    tmp_rad = np.repeat(tmp_rad, rad_fract_per_day)
    if params['utc_offset']:
        utc_offset = int(
            (params['lon'] / cnst.DEG_PER_REV) * rad_fract_per_day)
        tiny_rad_fract = np.roll(tiny_rad_fract.flatten(), -utc_offset)
        tmp_rad = np.roll(tmp_rad.flatten(), -utc_offset)
        tiny_rad_fract = tiny_rad_fract.flatten()
    else:
        utc_offset = 0
        tiny_rad_fract = tiny_rad_fract.flatten()
    chunk_size = int(ts * (cnst.SEC_PER_MIN / cnst.SW_RAD_DT))
    ts_id = np.repeat(np.arange(ts_per_day), chunk_size)
    for day in range(n_days):
        # Mask to select out from tiny_rad_fract
        radslice = slice((day_of_year[day] - 1) * rad_fract_per_day,
                         (day_of_year[day]) * rad_fract_per_day)
        rad = tiny_rad_fract[radslice]

        # Mask to select out time chunk to place disaggregated values into
        dslice = slice(int(day * ts_per_day), int((day + 1) * ts_per_day))

        # Mask to weight daily solar radiation with
        weight_slice = slice(int(day * rad_fract_per_day),
                             int((day + 1) * rad_fract_per_day))

        rad_chunk = np.bincount(ts_id, weights=rad * tmp_rad[weight_slice])
        disaggrad[dslice] = rad_chunk

    return disaggrad
Ejemplo n.º 3
0
def get_number_of_tp(y_true: np.array, y_pred: np.array) -> float:
    """
    Calculating the number of positive examples which are true positive
    :return: accuracy of the positive label
    """
    positive_indices = np.where(y_true.flatten() == 1)[0]
    return y_pred.flatten()[positive_indices].sum()
Ejemplo n.º 4
0
def calculate_confusion_matrix_from_arrays(ground_truth: np.array,
                                           prediction: np.array,
                                           num_classes: int) -> np.array:
    """
    Calculate confusion matrix for a given set of classes.
    if GT value is outside of the [0, num_classes) it is excluded.

    Args:
        ground_truth:
        prediction:
        num_classes:
    Returns:
    """
    # a long 2xn array with each column being a pixel pair
    replace_indices = np.vstack((ground_truth.flatten(), prediction.flatten()))

    valid_index = replace_indices[0, :] < num_classes
    replace_indices = replace_indices[:, valid_index].T

    # add up confusion matrix
    confusion_matrix, _ = np.histogramdd(
        replace_indices,
        bins=(num_classes, num_classes),
        range=[(0, num_classes), (0, num_classes)],
    )
    return confusion_matrix.astype(np.uint64)
Ejemplo n.º 5
0
def calcualte_recall(y_true: np.array, y_pred: np.array) -> float:
    """
    Calculating the accuracy of the "1" labels (recall) - querying the indices of the positive values in y_true and check the value
    of the prediction in y_pred
    :return: accuracy of the positive label
    """
    positive_indices = np.where(y_true.flatten() == 1)[0]
    return (y_pred.flatten()[positive_indices].sum() / len(positive_indices)).sum()
Ejemplo n.º 6
0
def image_distance(img1: np.array, img2: np.array) -> float:
    """Calculate the distance between two images for comparism"""
    assert img1.shape == img2.shape
    img1 = img1.flatten()
    img2 = img2.flatten()
    # return scipy.spatial.distance.euclidean(img1, img2)
    # return scipy.spatial.distance.cosine(img1, img2)
    return scipy.spatial.distance.cityblock(img1, img2)
Ejemplo n.º 7
0
 def _flatten_and_concatenate_activations(self,
                                          activations_4_3: numpy.array,
                                          style_array_5_3: numpy.array):
     flat_4_3 = activations_4_3.flatten()
     flat_5_3 = style_array_5_3.flatten()
     # Set precision of target array from float64 to float32 to save some memory (this array is stored for a long time)
     concatenated = numpy.concatenate((flat_4_3, flat_5_3))
     result_32 = concatenated.astype(dtype=numpy.float32,
                                     casting='same_kind')
     return result_32
Ejemplo n.º 8
0
def inner_prod_fs(a: np.array, b: np.array):
    if complex.is_complex(a) and complex.is_complex(b):
        return 2 * (a.flatten() @ b.flatten()) - a[:, :, :, 0, :].flatten(
        ) @ b[:, :, :, 0, :].flatten()
    elif complex.is_real(a) and complex.is_real(b):
        return 2 * (a.flatten() @ b.flatten()
                    ) - a[:, :, :, 0].flatten() @ b[:, :, :, 0].flatten()
    else:
        raise NotImplementedError(
            'Not implemented for mixed real and complex.')
Ejemplo n.º 9
0
    def update(self, y_t: np.array, y_pred: np.array):
        y_t = y_t.flatten()
        y_pred = y_pred.flatten()
        n_obs = self.model_info.endog.shape[0]
        m = self.model

        # Residual Calculation
        eps_t = y_t - y_pred
        m.resid = np.insert(m.resid, n_obs, eps_t, axis=0)
        # Add data point
        self.model_info.endog = np.insert(self.model_info.endog, n_obs, y_t, axis=0)
Ejemplo n.º 10
0
    def _get_grad(self, x: np.array, y: np.array) -> np.array:
        loc = np.arange(y.shape[0])

        # Get un-flattened gradient
        grad = x[loc, y.flatten()]
        grad = -1/grad

        # Error signal
        d = np.zeros_like(x)
        d[loc, y.flatten()] = grad
        return d
Ejemplo n.º 11
0
def save_tdf(filename: str, tdf: np.array, dimx: int, dimy: int, dimz: int,
             voxel_size: float, matrix: np.array) -> None:
    with open(filename, 'wb') as f:
        f.write(struct.pack('I', dimx))
        f.write(struct.pack('I', dimy))
        f.write(struct.pack('I', dimz))
        f.write(struct.pack('f', voxel_size))
        f.write(struct.pack("={}f".format(16), *matrix.flatten("F")))

        num_elements = dimx * dimy * dimz
        f.write(struct.pack("={}f".format(num_elements), *tdf.flatten("F")))
 def update(
     self,
     name            :str,
     extrinsics      :numpy.array,
     intrinsics      :numpy.array,
     correspondences :numpy.array
 ):
     self.data["Viewpoints"].append({
         "name"              : name,
         "extrinsics"        : extrinsics.flatten().tolist(),
         "intrinsics"        : intrinsics.flatten().tolist()
     })
    def __call__(self, image: np.array) -> Image:

        # get image histogram
        image_histogram, bins = np.histogram(image.flatten(),
                                             self.number_bins,
                                             density=True)
        cdf = image_histogram.cumsum()  # cumulative distribution function
        cdf = 255 * cdf / cdf[-1]  # normalize

        # use linear interpolation of cdf to find new pixel values
        image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
        return Image.fromarray(image_equalized.reshape(image.shape))
Ejemplo n.º 14
0
Archivo: ts.py Proyecto: bigdig/Thesis
    def update(self, y_t: np.array, y_pred: np.array):
        y_t = y_t.flatten()
        y_pred = y_pred.flatten()
        n_obs = self.model_info.endog.shape[0]
        m = self.model

        # Residual Calculation
        eps_t = y_t - y_pred
        m.resid = np.insert(m.resid, n_obs, eps_t, axis=0)
        # Add data point
        self.model_info.endog = np.insert(self.model_info.endog,
                                          n_obs,
                                          y_t,
                                          axis=0)
Ejemplo n.º 15
0
def check_winner(state: np.array, last_move: int, num_rows: int, num_cols: int):
    # Borrowed from https://github.com/geoffreyyip/numpy-tictactoe
    # TODO: make this more generic so we can play more varied game types

    if num_rows != num_cols:
        raise ValueError('TODO: handle arbitrary board sizes')

    for i in range(0, 3):
        # Checks rows and columns for match
        rows_win = (state[i, :] == last_move).all()
        cols_win = (state[:, i] == last_move).all()

        if rows_win or cols_win:
            return last_move

    diag1_win = (np.diag(state) == last_move).all()
    diag2_win = (np.diag(np.fliplr(state)) == last_move).all()

    if diag1_win or diag2_win:
        # Checks both diagonals for match
        return last_move

    # Check for draw
    if not (state.flatten() == EMPTY).any():
        # We have a draw
        return DRAW
Ejemplo n.º 16
0
def calculate_histogram(img_array: np.array) -> (np.array, np.array, np.array):
    """
    g1(l) = ∑(l, k=0) pA(k) ⇒ g1(l)−g1(l −1) = pA(l) = hA(l)/NM (l = 1,...,255)

    geA(l) = round(255g1(l))

    calculate_histogram generates the histogram for an image,
    the equalized histogram,
    and a new quantized image based on the equalized histogram.
    """

    flat = img_array.flatten()

    hist = histogram(flat)
    cs = cumsum(hist)

    nj = (cs - cs.min()) * 255

    N = cs.max() - cs.min()

    cs = nj / N

    cs_casted = cs.astype(np.uint8)

    equalized = cs_casted[flat]
    img_new = np.reshape(equalized, img_array.shape)

    return hist, histogram(equalized), img_new
Ejemplo n.º 17
0
    def _check_Xy(self, X, y: np.array = None) -> [np.array, np.array]:
        """Check X and y to be valid"""

        if len(X.shape) != 2:
            raise ValueError('X should be 2D (n_samples x n_features)')

        if y is not None:
            n_samples, n_features = X.shape
            if len(y.flatten()) != n_samples:
                raise ValueError('number of samples in y is not equal to X')

            self.classes_, self.n_classes_ = np.unique(y, return_counts=True)
            if len(self.classes_) > 2:
                raise NotImplementedError('Just binary class supported'
                                          ', multi class not supported yet')

            # Get indexes of sorted number of each class
            sorted_indexes = np.argsort(self.n_classes_)

            # Label of each class
            self.minC, self.majC = self.classes_[sorted_indexes]

            # Number of each class
            self._nMin, self._nMaj = self.n_classes_[sorted_indexes]

            # get indexes of minority and majority classes
            self._minIndexes = np.where(y != self.majC)[0]
            self._majIndexes = np.where(y == self.majC)[0]

            # separate X and Y of majority class from whole data
            self._majX, self._majY = X[self._majIndexes], y[self._majIndexes]

        return X, y
Ejemplo n.º 18
0
def train_test_split_class(x: np.array,
                           y: np.array,
                           split: float,
                           split_valance=None,
                           special_case=False):
    if split_valance is None:
        split_valance = [0.5, 0.5]
    if sum(split_valance) != 1:
        raise ValueError("The valance should sum to 1")

    a_value, b_value = np.unique(y)

    indices_a = np.argwhere(y.flatten() == a_value).T.flatten()
    indices_b = np.argwhere(y.flatten() == b_value).T.flatten()
    # np.random.shuffle(indices_a)
    # np.random.shuffle(indices_b)

    split_a, split_b = int(
        (1 - split * split_valance[0] * 2) * indices_a.shape[0]), int(
            (1 - split * split_valance[1] * 2) * indices_b.shape[0])

    train_indices_a, val_indices_a = indices_a[:split_a], indices_a[split_a:]
    train_indices_b, val_indices_b = indices_b[:split_b], indices_b[split_b:]

    x_train, x_val = np.concatenate(
        (x[:, train_indices_a], x[:, train_indices_b]),
        axis=1), np.concatenate((x[:, val_indices_a], x[:, val_indices_a]),
                                axis=1)

    y_train, y_val = np.concatenate(
        (y[:, train_indices_a], y[:, train_indices_b]),
        axis=1), np.concatenate((y[:, val_indices_a], y[:, val_indices_a]),
                                axis=1)

    return x_train, x_val, y_train, y_val
Ejemplo n.º 19
0
def _array_to_stat_list(array: np.array, stat: str) -> list:
    list_of_stats = []
    # add the results to the lists of values and times
    if array.ndim == 1 or array.ndim == 2:
        if stat == 'mean':
            list_of_stats.append(np.nanmean(array))
        elif stat == 'median':
            list_of_stats.append(np.nanmedian(array))
        elif stat == 'max':
            list_of_stats.append(np.nanmax(array))
        elif stat == 'min':
            list_of_stats.append(np.nanmin(array))
        elif stat == 'sum':
            list_of_stats.append(np.nansum(array))
        elif stat == 'std':
            list_of_stats.append(np.nanstd(array))
        elif '%' in stat:
            list_of_stats.append(
                np.nanpercentile(array, int(stat.replace('%', ''))))
        elif stat == 'values':
            list_of_stats.append(array.flatten().tolist())
        else:
            raise ValueError(unknown_stat(stat))
    elif array.ndim == 3:
        for a in array:
            list_of_stats += _array_to_stat_list(a, stat)
    else:
        raise ValueError(
            'Too many dimensions in the array. You probably did not mean to do stats like this'
        )
    return list_of_stats
Ejemplo n.º 20
0
def get_number_of_positves(y: np.array) -> int:
    """
    Calculate the number of positives in an array
    :param y:
    :return:
    """
    return len(np.where(y.flatten() == 1)[0])
Ejemplo n.º 21
0
 def __init__(self, A: np.array=np.zeros((3,4)), b: np.array=np.zeros(3), d: np.array=np.zeros(4)):
     self.c = A.flatten()
     self.shape = A.shape
     self.A_eq = np.hstack([np.identity(self.shape[1])] * self.shape[0])
     self.A_ub = block_diag(*[np.ones(self.shape[1])] * self.shape[0])
     self.b = b
     self.d = d
Ejemplo n.º 22
0
 def totxtfile(self, save_path, value_np: np.array, bit_16_represent=False):
     if self.cfg.setup.log_txt:
         if bit_16_represent:
             np.save(save_path, _cast_bfloat16_then_float32(value_np))
         else:
             np.savetxt(save_path, value_np.flatten(), fmt='%f', header=str(value_np.shape))
         print("----> %s" % save_path)
Ejemplo n.º 23
0
 def forward(self, input:np.array, *args) -> np.array:
     '''
     forward step
     Parameters:
         - input: input of shape (batch_size, x_dim, y_dim, z_dim) [numpy.array]
         - *args: whether Layer is in training (=True) mode or in prediction mode (=False) [Boolean]
     Returns:
         - drop_forward: output of shape (batch_size, x_dim, y_dim, z_dim) [numpy.array]
         - ac_forward: output after activation function (batch_size, x_dim, y_dim, z_dim) [numpy.array]
     '''
     self.training, (*_) = args
     if self.training:
         ## get length of flattened input
         length = input.flatten().__len__()
         ## get as much zeros as factor * length
         zeros = np.zeros(int(length * self.factor))
         ## fill the rest with ones
         ones = np.ones(length - zeros.__len__())
         ## concat both, shuffle and reshape to input's shape
         scale = np.append(ones, zeros)
         np.random.shuffle(scale)
         self.scale = scale.reshape(input.shape)
         drop_forward = input * self.scale
     else:
         drop_forward = input.copy()
     ac_forward = drop_forward.copy()
     return drop_forward, ac_forward
Ejemplo n.º 24
0
    def create_filter(self, initial_detection: np.array):
        num_points = initial_detection.shape[0]
        dim_z = 2 * num_points
        dim_x = 2 * 2 * num_points  # We need to accommodate for velocities

        filter = KalmanFilter(dim_x=dim_x, dim_z=dim_z)

        # State transition matrix (models physics): numpy.array()
        filter.F = np.eye(dim_x)
        dt = 1  # At each step we update pos with v * dt

        filter.F[:dim_z, dim_z:] = dt * np.eye(dim_z)

        # Measurement function: numpy.array(dim_z, dim_x)
        filter.H = np.eye(
            dim_z,
            dim_x,
        )

        # Measurement uncertainty (sensor noise): numpy.array(dim_z, dim_z)
        filter.R *= self.R

        # Process uncertainty: numpy.array(dim_x, dim_x)
        # Don't decrease it too much or trackers pay too little attention to detections
        filter.Q[dim_z:, dim_z:] *= self.Q

        # Initial state: numpy.array(dim_x, 1)
        filter.x[:dim_z] = np.expand_dims(initial_detection.flatten(), 0).T

        # Estimation uncertainty: numpy.array(dim_x, dim_x)
        filter.P[dim_z:, dim_z:] *= self.P

        return filter
Ejemplo n.º 25
0
def encode_haze(network:MarabouNetwork, image:np.array, epsilon:float, output_index:int) -> MarabouNetwork:
    '''Encodes a haze transformation as a Marabou input query

    Args:
        network (MarabouNetwork): the MarabouNetwork object
        image (np.array): The input image
        epsilon (float): Amount of transform
        output_index (int): Target output node (for the expected class)

    Returns:
        MarabouNetwork: The MarabouNetwork object with the encoded input query
    '''
    n_inputs = network.inputVars[0].flatten().shape[0]
    n_outputs = network.outputVars[0].flatten().shape[0]
    flattened_image = image.flatten()
    eps = network.getNewVariable()
    network.setLowerBound( eps, 0 )
    network.setUpperBound( eps, epsilon )
    network.inputVars = np.array([eps])
    for i in range(n_inputs):
        val = flattened_image[i]
        network.addEquality([i, eps], [1, val - 1], val)
    for i in range(n_outputs):
        if i != output_index:
            network.addInequality([network.outputVars[0][i], network.outputVars[0][output_index]], [1, - 1], 0)
    return network
Ejemplo n.º 26
0
 def run(self,
         likelihood: nirt.likelihood.Likelihood,
         theta_active: np.array,
         active_ind: np.array) -> \
         Tuple[np.array, nirt.likelihood.Likelihood]:
     logger = logging.getLogger("Solver.solve_at_resolution")
     # Improve theta estimates by Metropolis sweeps.
     # A vector of size len(theta_active) * C containing all person parameters.
     t = theta_active.flatten()
     energy = likelihood.log_likelihood_term(t, active_ind)
     theta_estimator = nirt.mcmc.McmcThetaEstimator(likelihood,
                                                    self._temperature)
     ll = sum(energy)
     logger.info("log-likelihood {:.2f}".format(ll))
     for sweep in range(self._num_iterations):
         ll_old = ll
         t, energy = theta_estimator.estimate(t,
                                              active=active_ind,
                                              energy=energy)
         ll = sum(energy)
         logger.info(
             "MCMC sweep {:2d} log-likelihood {:.4f} increase {:.2f} accepted {:.2f}%"
             .format(sweep, sum(energy), ll - ll_old,
                     100 * theta_estimator.acceptance_fraction))
     return t.reshape(theta_active.shape)  #, likelihood
Ejemplo n.º 27
0
    def __init__(self, x: np.array, y: np.array, n_classes=2, out_features=3):
        y = y.flatten()
        n_features = x.shape[1]
        mean_vectors = []
        for i in range(n_classes):
            mean_vectors.append(np.mean(x[y == i], axis=0))

        s_w = np.zeros((n_features, n_features))
        for i, mv in zip(range(n_classes), mean_vectors):
            for sample in x[y == i]:
                sample, mv = sample.reshape(-1, 1), mv.reshape(n_features, 1)
                s_w += (sample - mv).dot((sample - mv).T)
        global_mean = np.mean(x, axis=0)

        s_b = np.zeros((n_features, n_features))
        for i, mean_vec in enumerate(mean_vectors):
            n = x[y == i].shape[0]
            mean_vec = mean_vec.reshape(-1, 1)
            global_mean = global_mean.reshape(-1, 1)
            s_b += n * (mean_vec - global_mean).dot((mean_vec - global_mean).T)

        eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(s_w).dot(s_b))
        eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:, i])
                     for i in range(len(eig_vals))]
        eig_pairs = sorted(eig_pairs, key=lambda k: k[0], reverse=True)
        self.w = np.hstack(
            [eig_pairs[i][1].reshape(-1, 1) for i in range(0, out_features)])
Ejemplo n.º 28
0
    def setup_filter(self, initial_detection: np.array):
        initial_detection = validate_points(initial_detection)

        dim_x = 2 * 2 * self.num_points  # We need to accomodate for velocities
        dim_z = 2 * self.num_points
        self.dim_z = dim_z
        self.filter = KalmanFilter(dim_x=dim_x, dim_z=dim_z)

        # State transition matrix (models physics): numpy.array()
        self.filter.F = np.eye(dim_x)
        dt = 1  # At each step we update pos with v * dt
        for p in range(dim_z):
            self.filter.F[p, p + dim_z] = dt

        # Measurement function: numpy.array(dim_z, dim_x)
        self.filter.H = np.eye(
            dim_z,
            dim_x,
        )

        # Measurement uncertainty (sensor noise): numpy.array(dim_z, dim_z)
        # TODO: maybe we should open this one to the users, as it lets them
        #       chose between giving more/less importance to the detections
        self.filter.R *= 4.0

        # Process uncertainty: numpy.array(dim_x, dim_x)
        # Don't decrease it too much or trackers pay too little attention to detections
        # self.filter.Q[:dim_z, :dim_z] /= 50
        self.filter.Q[dim_z:, dim_z:] /= 10

        # Initial state: numpy.array(dim_x, 1)
        self.filter.x[:dim_z] = np.expand_dims(initial_detection.flatten(), 0).T

        # Estimation uncertainty: numpy.array(dim_x, dim_x)
        self.filter.P[dim_z:, dim_z:] *= 10.0
Ejemplo n.º 29
0
    def forwards_kinematics(self, current_pose: np.array) -> np.array:
        current_pose = current_pose.flatten()

        current_pose[2] += current_pose[1]  # relative to absolute

        theta = current_pose[0]

        joint_1 = self._joint_1_length * np.array([[math.cos(
            current_pose[1])], [math.sin(current_pose[1])]])

        joint_2 = self._joint_2_length * np.array([[math.cos(
            current_pose[2])], [math.sin(current_pose[2])]])

        end_effector = joint_1 + joint_2

        r = np.linalg.norm(end_effector)
        end_effector_unit = (end_effector / r).flatten()

        # need to subtract half-pi because rho is angle down from z, not angle up from xy
        rho = math.pi / 2 - math.atan2(end_effector_unit[1],
                                       end_effector_unit[0])

        x = r * math.sin(rho) * math.cos(theta)
        y = r * math.sin(rho) * math.sin(theta)
        z = r * math.cos(rho)

        return np.array([[x], [y], [z]])
Ejemplo n.º 30
0
	def encode_structures(self, cells: np.array) -> np.array:
		structures = np.zeros(shape=(self.board_width * self.num_planes), dtype=np.uint8)
		for idx, cell in enumerate(cells.flatten()):
			if cell.structure is not None:
				ship = 1 if cell.ship.owner == self.owner else -1
				structures[idx] = ship
		return structures.reshape(self.board_height, self.board_width)
Ejemplo n.º 31
0
 def reachable_states(self, board: np.array) -> Iterable[State]:
     flattened = board.flatten()
     for i in range(len(flattened)):
         if flattened[i] == 0:
             state = flattened.copy()
             state[i] = self.token
             yield tuple(state)
Ejemplo n.º 32
0
    def insert(self, date_str: str, weekly_soil_mois: np.array) -> None:
        """
        :param date_str: current data's datetime
        :param weekly_soil_mois: data
        :return: None
        """
        flattened_data = weekly_soil_mois.flatten()

        with Connection() as conn:
            cur = conn.cursor()
            for gid, val in enumerate(flattened_data.tolist()):
                val = float('NaN') if val in [-999, -9999] else val
                try:
                    cur.execute(
                        self.INSERT_SOIL_MOISTURE,
                        (gid, datetime.datetime.strptime(date_str,
                                                         "%Y%m%d"), val))
                    self.inserted_count += cur.rowcount
                    conn.commit()
                except Exception:
                    logger.error("error: " + traceback.format_exc())

            logger.info(
                f'{date_str} finished, total inserted {self.inserted_count}')
            cur.close()