Ejemplo n.º 1
0
    def _check_sst_quality(self, dataset, product_type):
        mask_specs = product_type.get_mask_consistency_check_specs()
        if len(mask_specs) == 0:
            return

        sst_variable_names = product_type.get_sst_variable_names()
        if len(sst_variable_names) == 0:
            return

        quality_variable_name = mask_specs[0][2]
        quality_data = dataset.variables[quality_variable_name][:]

        valid_retrieval_quality = ma.masked_less(quality_data, 2)
        self.report["sst_valid_retrieval"] = float(valid_retrieval_quality.count())

        failed_retrieval_quality = ma.masked_not_equal(quality_data, 1)
        sst_variable = dataset.variables[sst_variable_names[0]]
        fill_value = sst_variable.getncattr('_FillValue')
        sst_quality_one_data = ma.array(sst_variable[:], mask=failed_retrieval_quality.mask)

        invalid_retrieval = ma.masked_equal(sst_quality_one_data, fill_value)
        self.report["sst_invalid_retrieval"] = float(invalid_retrieval.count())

        failed_retrieval = ma.masked_not_equal(sst_quality_one_data, fill_value)
        self.report["sst_failed_retrieval"] = float(failed_retrieval.count())

        not_ocean = ma.masked_not_equal(quality_data, 0)
        self.report["not_ocean"] = float(not_ocean.count())
Ejemplo n.º 2
0
def logisticRegression(x, t, K, alpha=0.01, epsilon=0.001, steps=500):
    """
    Return  w:      vector with dimension M(K)
    """
    #initialize w, phi, _t
    phi = basisNone(x)
    M = phi.shape[1]
    for _ in range(K - 1):
        phi = scipy.linalg.block_diag(phi, basisNone(x))
    w = np.random.random(phi.shape[1])

    _t = np.empty(0)
    for k in range(K):
        _t = np.append(_t, ma.masked_not_equal(ma.masked_not_equal(t, k + 1).filled(0), 0).filled(1))

    error = 0
    de = 0
    #gradient descent on error function
    for step in range(steps):
        previous = error
        _y = y(w.reshape((K, M)), basisNone(x))
        w = w_new(w, _y, _t, phi, K, alpha)
        error = np.dot(-_t, np.array(map(math.log, abs(_y))))
        de = error - previous
        if abs(de) < epsilon:
            print "Finished after step %d with delta-error = %f error = %f" % (step, de, error)
            break
    return w
Ejemplo n.º 3
0
def logisticRegression(x, t, K, alpha=0.01, epsilon=0.001, steps=500):
    """
    Return  w:      vector with dimension M(K)
    """
    #initialize w, phi, _t
    phi = basisNone(x)
    M = phi.shape[1]
    for _ in range(K - 1):
        phi = scipy.linalg.block_diag(phi, basisNone(x))
    w = np.random.random(phi.shape[1])

    _t = np.empty(0)
    for k in range(K):
        _t = np.append(
            _t,
            ma.masked_not_equal(ma.masked_not_equal(t, k + 1).filled(0),
                                0).filled(1))

    error = 0
    de = 0
    #gradient descent on error function
    for step in range(steps):
        previous = error
        _y = y(w.reshape((K, M)), basisNone(x))
        w = w_new(w, _y, _t, phi, K, alpha)
        error = np.dot(-_t, np.array(map(math.log, abs(_y))))
        de = error - previous
        if abs(de) < epsilon:
            print "Finished after step %d with delta-error = %f error = %f" % (
                step, de, error)
            break
    return w
    def _check_sst_quality(self, dataset, product_type):
        mask_specs = product_type.get_mask_consistency_check_specs()
        if len(mask_specs) == 0:
            return

        sst_variable_names = product_type.get_sst_variable_names()
        if len(sst_variable_names) == 0:
            return

        quality_variable_name = mask_specs[0][2]
        quality_data = dataset.variables[quality_variable_name][:]

        valid_retrieval_quality = ma.masked_less(quality_data, 2)
        self.report["sst_valid_retrieval"] = float(
            valid_retrieval_quality.count())

        failed_retrieval_quality = ma.masked_not_equal(quality_data, 1)
        sst_variable = dataset.variables[sst_variable_names[0]]
        fill_value = sst_variable.getncattr('_FillValue')
        sst_quality_one_data = ma.array(sst_variable[:],
                                        mask=failed_retrieval_quality.mask)

        invalid_retrieval = ma.masked_equal(sst_quality_one_data, fill_value)
        self.report["sst_invalid_retrieval"] = float(invalid_retrieval.count())

        failed_retrieval = ma.masked_not_equal(sst_quality_one_data,
                                               fill_value)
        self.report["sst_failed_retrieval"] = float(failed_retrieval.count())

        not_ocean = ma.masked_not_equal(quality_data, 0)
        self.report["not_ocean"] = float(not_ocean.count())
Ejemplo n.º 5
0
    def __call__(self, meta_data, img, depth, points):
        meta_data_occ = copy.deepcopy(meta_data)
        label = meta_data_occ['mask']
        for k in range(5):
            subpath = random.choice(self.syn)
            front = np.array(
                Image.open('{0}/{1}-color.png'.format(self.dataset_root,
                                                      subpath)).convert("RGB"))
            f_label = np.array(
                Image.open('{0}/{1}-label.png'.format(self.dataset_root,
                                                      subpath)))
            front_label = np.unique(f_label).tolist()[1:]
            if len(front_label) < self.front_num:
                continue
            front_label = random.sample(front_label, self.front_num)
            for f_i in front_label:
                mk = ma.getmaskarray(ma.masked_not_equal(f_label, f_i))
                if f_i == front_label[0]:
                    mask_front = mk
                else:
                    mask_front = mask_front * mk
            t_label = label * mask_front
            if len(t_label.nonzero()[0]) > 1000:
                label = t_label
                break

        mask_front = np.expand_dims(mask_front, 2)
        img_occ = img * mask_front + front * ~mask_front
        meta_data_occ['mask'] = label
        return meta_data_occ, img_occ, depth, points
Ejemplo n.º 6
0
def parse_segments(seg, msk_modes):
    """Parse the label segments.

    Each channel corresponds to a different region of the tumor, decouple and stack these

    mode_to_key_value = {"necrotic": 1, "edema": 2, "GD": 4}

    Args:
        seg: The segmentation labels
        msk_modes: Label mode to parse for the model

    Returns:
        The processed mask labels

    """
    msks_parsed = []
    for slice in range(seg.shape[-1]):
        # which mask values indicicate which label mode
        mode_to_key_value = {"necrotic": 1, "edema": 2, "GD": 4}
        curr = seg[:, :, slice]
        this_msk_parts = []
        for mode in msk_modes:
            this_msk_parts.append(
                ma.masked_not_equal(
                    curr, mode_to_key_value[mode]).filled(fill_value=0))
        msks_parsed.append(np.dstack(this_msk_parts))

    # Replace all tumorous areas with 1 (previously marked as 1, 2 or 4)
    mask = np.asarray(msks_parsed)
    mask[mask > 0] = 1

    return mask
def update_aseg_fs(subject):
    """
    Merge Freesurfer brain segmentation with the lesion segmentation 
    performed by samseg in Freesurfer's space.

    Parameters
    ----------
    subject : TYPE str
        Subject id.

    Returns
    -------
    None.

    """
    # Load lesion mask
    lesion_image = nib.load(SUBJECTS_DIR + "/sub-{0}_MPRAGE.nii/mri/lesions_fs.mgz".format(subject))
    lesion_mx = lesion_image.get_fdata()
    
    lesion_mask = ma.masked_not_equal(lesion_mx,1)
    
    # Load Freesurfer segmentation mask (aseg)
    aseg_image = nib.load(SUBJECTS_DIR + "/sub-{0}_MPRAGE.nii/mri/aseg.mgz".format(subject))
    aseg_mx = aseg_image.get_fdata()
    
    # Set all voxels of aseg marked as lesion in the lesion mask to 99 (Freesurfer lesion id)
    aseg_mask = ma.masked_array(aseg_mx, np.logical_not(lesion_mask.mask), fill_value=99).filled()
    
    # Save resulting matrix to nifti file
    nifti_out = nib.Nifti1Image(aseg_mask,affine=aseg_image.affine)
    nib.save(nifti_out, SUBJECTS_DIR +"/sub-{0}_MPRAGE.nii/mri/aseg_lesions.nii.gz".format(subject))   
Ejemplo n.º 8
0
def func(V, Y):
	count_each_feature_value = Counter(V) 
	feature_value = np.unique(V)
	num_value_feature = len(feature_value)
	subtotal = 0
	count = 0
	for ele in feature_value:# each value in this feature
		weight = count_each_feature_value[feature_value[count]] / float(sampleNum)
		after_mask = ma.masked_not_equal(V,ele)
		ma.set_fill_value(after_mask,0)
		after_mask = after_mask.filled()
		mat_each_value_feature = np.dot(after_mask, Y) #
		tmp = mat_each_value_feature
		tmp_sum = np.sum(mat_each_value_feature)
		mat_each_value_feature = np.dot(mat_each_value_feature,  1.0 / tmp_sum)
		current = 0
			#after_compress = ma.masked_equal(mat_each_value_feature, 0)
			#after_compress = after_compress.compressed()
			#t5 = time.clock()
			#current = np.dot(mat_each_value_feature, np.log2(after_compress))
			#t6 = time.clock()
			#mat_each_value_feature = mat_each_value_feature[-mat_each_value_feature.mask]
			#ma.set_fill_value(mat_each_value_feature,1)
			#mat_each_value_feature = mat_each_value_feature.filled()
			#current = np.dot(mat_each_value_feature, np.log2(mat_each_value_feature))
			#current = 0
		for k in mat_each_value_feature:
			if k == 0:
				continue;
			current += k * math.log(k,2)
		subtotal += current * weight
		count += 1
	subtotal = - subtotal
	return subtotal
Ejemplo n.º 9
0
    def openData(self, ccd, ap, save=False, mask=True):
        target = self.target.replace(' ', '_')
        filters = self.filters[::-1]
        ccd = str(ccd)
        ap = str(ap)
        if ccd not in self.apnames.keys():
            raise ValueError('{} not a valid CCD'.format(ccd))
        if ap not in self.apnames[ccd]:
            raise ValueError('{} not a valid aperture'.format(ap))

        data = self.logf.tseries(ccd, ap)
        obstime = Time(data.t,
                       format='mjd',
                       scale='utc',
                       location=self.tel_location)
        data.t = obstime.tdb.value
        exp = self.logf[ccd]['Exptim'] / 86400
        weights = np.ones(len(data.t))
        m = data.get_mask()
        zero_flux_mask = ma.getmask(ma.masked_not_equal(data.y, 0))
        data_mask = np.logical_and(~m, zero_flux_mask)
        if mask:
            out = np.column_stack([
                data.t[data_mask], exp[data_mask], data.y[data_mask],
                data.ye[data_mask], weights[data_mask], weights[data_mask]
            ])
        else:
            out = np.column_stack(
                [data.t, exp, data.y, data.ye, weights, weights])
        return out, data_mask
Ejemplo n.º 10
0
def func(V, Y):
    count_each_feature_value = Counter(V)
    feature_value = np.unique(V)
    num_value_feature = len(feature_value)
    subtotal = 0
    count = 0
    for ele in feature_value:  # each value in this feature
        weight = count_each_feature_value[feature_value[count]] / float(
            sampleNum)
        after_mask = ma.masked_not_equal(V, ele)
        ma.set_fill_value(after_mask, 0)
        after_mask = after_mask.filled()
        mat_each_value_feature = np.dot(after_mask, Y)  #
        tmp = mat_each_value_feature
        tmp_sum = np.sum(mat_each_value_feature)
        mat_each_value_feature = np.dot(mat_each_value_feature, 1.0 / tmp_sum)
        current = 0
        #after_compress = ma.masked_equal(mat_each_value_feature, 0)
        #after_compress = after_compress.compressed()
        #t5 = time.clock()
        #current = np.dot(mat_each_value_feature, np.log2(after_compress))
        #t6 = time.clock()
        #mat_each_value_feature = mat_each_value_feature[-mat_each_value_feature.mask]
        #ma.set_fill_value(mat_each_value_feature,1)
        #mat_each_value_feature = mat_each_value_feature.filled()
        #current = np.dot(mat_each_value_feature, np.log2(mat_each_value_feature))
        #current = 0
        for k in mat_each_value_feature:
            if k == 0:
                continue
            current += k * math.log(k, 2)
        subtotal += current * weight
        count += 1
    subtotal = -subtotal
    return subtotal
def update_aseg_norm(subject):
    """
    Merge normalized Freesurfer brain segmentation with the lesion segmentation 
    performed by samseg. 

    Parameters
    ----------
    subject : TYPE str
        Subject id.

    Returns
    -------
    None.

    """
    # Load lesion mask
    lesion_image = nib.load(MAIN_DIR+'/derivatives/segmentations/sub-{0}/ses-{1}/sub-{0}_lesions_binary.nii.gz'.format(subject,SESSION))
    lesion_mx = lesion_image.get_fdata()
    
    lesion_mask = ma.masked_not_equal(lesion_mx,1)
    
    # Load Freesurfer segmentation mask (aseg)
    aseg_image = nib.load(MAIN_DIR + "/derivatives/segmentations/sub-{0}/ses-{1}/sub-{0}_aseg_normalized.nii.gz".format(subject,SESSION))
    aseg_mx = aseg_image.get_fdata()
    
    # Set all voxels of aseg marked as lesion in the lesion mask to 99 (Freesurfer lesion id)
    aseg_mask = ma.masked_array(aseg_mx, np.logical_not(lesion_mask.mask), fill_value=99).filled()
    
    # Save resulting matrix to nifti file
    nifti_out = nib.Nifti1Image(aseg_mask,affine=aseg_image.affine)
    nib.save(nifti_out, MAIN_DIR + "/derivatives/segmentations/sub-{0}/ses-{1}/sub-{0}_aseg_lesions.nii.gz".format(subject,SESSION))   
Ejemplo n.º 12
0
 def windows(self,
             w1=np.array([0, 0]),
             w2=np.array([0, 0]),
             r_w=0,
             w3=np.array([0, 0])):
     self.window_1 = w1  #array of start and ending time for window of use #1
     self.window_2 = w2  #array of start and ending time for window of use #2
     self.window_3 = w3  #array of start and ending time for window of use #3
     self.random_var_w = r_w  #percentage of variability in the start and ending times of the windows
     self.daily_use = np.zeros(1440)  #create an empty daily use profile
     self.daily_use[w1[0]:(w1[1])] = np.full(
         np.diff(w1), 0.001
     )  #fills the daily use profile with infinitesimal values that are just used to identify the functioning windows
     self.daily_use[w2[0]:(w2[1])] = np.full(
         np.diff(w2), 0.001)  #same as above for window2
     self.daily_use[w3[0]:(w3[1])] = np.full(
         np.diff(w3), 0.001)  #same as above for window3
     self.daily_use_masked = np.zeros_like(
         ma.masked_not_equal(self.daily_use, 0.001)
     )  #apply a python mask to the daily_use array to make only functioning windows 'visibile'
     self.random_var_1 = int(
         r_w * np.diff(w1)
     )  #calculate the random variability of window1, i.e. the maximum range of time they can be enlarged or shortened
     self.random_var_2 = int(r_w * np.diff(w2))  #same as above
     self.random_var_3 = int(r_w * np.diff(w3))  #same as above
     self.user.App_list.append(
         self
     )  #automatically appends the appliance to the user's appliance list
Ejemplo n.º 13
0
 def belowhorizon(z):
     """Return masked z values that are below the horizon.
     Below the horizon means either than z is negative or
     the z has a nonzero imaginary part.
     """
     imagz_ma = ma.getmaskarray(ma.masked_not_equal(z.imag, 0.))
     negz_ma = ma.getmaskarray(ma.masked_less(z, .0))
     belowhrz = ma.mask_or(imagz_ma, negz_ma)
     return belowhrz
Ejemplo n.º 14
0
def parse_segments(seg):
    # Each channel corresponds to a different region of the tumor, decouple and stack these

    msks_parsed = []
    for slice in range(seg.shape[-1]):
        curr = seg[:, :, slice]
        GD = ma.masked_not_equal(curr, 4).filled(fill_value=0)
        edema = ma.masked_not_equal(curr, 2).filled(fill_value=0)
        necrotic = ma.masked_not_equal(curr, 1).filled(fill_value=0)
        none = ma.masked_not_equal(curr, 0).filled(fill_value=0)

        msks_parsed.append(np.dstack((none, necrotic, edema, GD)))

    # Replace all tumorous areas with 1 (previously marked as 1, 2 or 4)
    mask = np.asarray(msks_parsed)
    mask[mask > 0] = 1

    return mask
Ejemplo n.º 15
0
 def zone( self, position ):
     """
     Return a masked array representing the zone for the input position
     @param position - i,j coordinates from which to derive zone
     @return - NumPy masked array representing the geometry of the zone for pixel at input position
     """
     array = self.gField.ReadAsArray()
     val = array[position[0], position[1]]
     maskArray = ma.masked_not_equal( array, val )  #All values not equal to zone value of input are masked
     return maskArray
Ejemplo n.º 16
0
def get_indexes(v, val):
    """
    Returns the indexes of the v array which have the value 'val':
    Cases:
        if v = column of a matrix:
            returns the rows which have the value 'val'
        if v = row of a matrix:
            returns the columns which have the value 'val'
    """
    max_mask = ma.getmask(ma.masked_not_equal(v, val))
    return list(ma.array(np.arange(len(v)), mask=max_mask).compressed())
    def __getitem__(self, index):
        self.counter += 1
        if not self.use_real_img:
            return self.gen_synthetic()
        if index > self.num_real_images - 1:
            return self.gen_synthetic()
        # for DA, enforces that each batch has at least one source sample
        if self.one_syn_per_batch and self.counter % self.batch_size == 0:
            return self.gen_synthetic()
        else:

            prefix = self.train_paths[index]

            # get raw image
            raw = cv2.imread(prefix + "-color.png")
            img = cv2.resize(raw, (self.input_height, self.input_width))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # load class info
            meta = loadmat(prefix + '-meta.mat')
            class_ids = meta['cls_indexes']

            # get segmentation gt, note 0 is for background
            label_img = cv2.imread(prefix + "-label.png")[:, :, 0]
            label_img = cv2.resize(label_img, (self.target_h, self.target_w),
                                   interpolation=cv2.INTER_NEAREST)

            # generate kp gt map of (nH, nW, nV)
            kp_gt_map_x = np.zeros((self.target_h, self.target_w, self.n_kp))
            kp_gt_map_y = np.zeros((self.target_h, self.target_w, self.n_kp))
            in_pkl = prefix + '-bb8_2d.pkl'
            with open(in_pkl, 'rb') as f:
                bb8_2d = pickle.load(f)

            for i, cid in enumerate(class_ids):
                class_mask = np.where(label_img == cid[0])
                kp_gt_map_x[class_mask] = bb8_2d[:, :, 0][i]
                kp_gt_map_y[class_mask] = bb8_2d[:, :, 1][i]

            mask_front = ma.getmaskarray(ma.masked_not_equal(label_img,
                                                             0)).astype(int)

            # debug: save 100 real imgages
            #cv2.imwrite("./real_images/img-" + str(random.randint(0, 100)) + ".jpg",cv2.cvtColor(img, cv2.COLOR_RGB2BGR))

            #TODO: get mask weighted by class
            return (torch.from_numpy(img.transpose(2, 0,
                                                   1)).float().div(255.0),
                    torch.from_numpy(label_img).long(),
                    torch.from_numpy(kp_gt_map_x).float(),
                    torch.from_numpy(kp_gt_map_y).float(),
                    torch.from_numpy(mask_front).float(),
                    torch.ones(self.target_w, self.target_h).long())
Ejemplo n.º 18
0
 def zone(self, position):
     """
     Return a masked array representing the zone for the input position
     @param position - i,j coordinates from which to derive zone
     @return - NumPy masked array representing the geometry of the zone for pixel at input position
     """
     array = self.gField.ReadAsArray()
     val = array[position[0], position[1]]
     maskArray = ma.masked_not_equal(
         array,
         val)  #All values not equal to zone value of input are masked
     return maskArray
    def _check_mask_consistency(self, dataset, product_type):
        """

        :type dataset: Dataset
        :type product_type: ProductType
        """

        consistency_check_specs = product_type.get_mask_consistency_check_specs(
        )
        if len(consistency_check_specs) == 0:
            return

        quality_variable_name = consistency_check_specs[0][2]
        quality_data = dataset.variables[quality_variable_name][:]
        quality_masks = {}
        for level in range(0, 6):
            level_mask = ma.masked_not_equal(quality_data, level).mask
            quality_masks.update({level: level_mask})

        for spec in consistency_check_specs:
            reference_variable_name = spec[0]
            objective_variable_name = spec[1]
            quality_variable_name = spec[2]

            if quality_variable_name in dataset.variables:
                quality_levels = spec[3]
                for l in quality_levels:
                    level_mask = quality_masks[l]
                    a = SstProductVerifier.__get_data_of_quality(
                        dataset, reference_variable_name, level_mask).mask
                    b = SstProductVerifier.__get_data_of_quality(
                        dataset, objective_variable_name, level_mask).mask
                    # false negatives: element is not masked in a, but masked in b
                    check_name = objective_variable_name + '.' + 'mask_false_negative_check_' + str(
                        l)
                    self.__check_false_negatives(a, b, check_name)
                    # false positives: element is masked in a, but not masked in b
                    check_name = objective_variable_name + '.' + 'mask_false_positive_check_' + str(
                        l)
                    self.__check_false_positives(a, b, check_name)
            else:
                a = SstProductVerifier.__get_data(dataset,
                                                  reference_variable_name).mask
                b = SstProductVerifier.__get_data(dataset,
                                                  objective_variable_name).mask
                # false negatives: element is not masked in a, but masked in b
                check_name = objective_variable_name + '.mask_false_negative_check'
                self.__check_false_negatives(a, b, check_name)
                # false positives: element is masked in a, but not masked in b
                check_name = objective_variable_name + '.' + 'mask_false_positive_check'
                self.__check_false_positives(a, b, check_name)
Ejemplo n.º 20
0
 def get_mask(self, dep, img, masked):
     img_dep = dep.detach().numpy()
     if masked:
         img_mask_1 = ma.masked_not_equal(img_dep, 0)
         img_mask_2 = ma.masked_less(img_dep, (70 / self.max_value))
         img_mask = ~img_mask_1.mask + ~img_mask_2.mask
         img_mask = np.array(img_mask, dtype=np.uint8)
         return 1 - img_mask
     else:
         mask = np.all(np.swapaxes(img.detach().numpy(), 0, 2) != [0, 0, 0], axis=-1)
         if mask.size != 1:
             return np.expand_dims(np.swapaxes(mask, 0, 1), axis=0)
         else:
             return np.ones(dep.shape, dtype=np.uint8)
Ejemplo n.º 21
0
    def __getitem__(self, index):
        # get a single training sample
        if not self.use_real_img:
            # use synthetic images
            return self.gen_synthetic()
        if index > len(self.train_paths) - 1:
            # generate synthetic images if index out of original data range
            return self.gen_synthetic()
        else:
            # use real images from YCB videos
            prefix = self.train_paths[index]

            # get raw image
            raw = imread(prefix + "-color.png")
            img = cv2.resize(raw, (self.input_height, self.input_width))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # load class info
            meta = loadmat(prefix + '-meta.mat')
            class_ids = meta['cls_indexes']

            # get segmentation gt, note 0 is for background
            label_img = imread(prefix + "-label.png")
            label_img = cv2.resize(label_img, (self.target_h, self.target_w),
                                   interpolation=cv2.INTER_NEAREST)

            # generate kp gt map of (nH, nW, nV)
            kp_gt_map_x = np.zeros((self.target_h, self.target_w, self.n_kp))
            kp_gt_map_y = np.zeros((self.target_h, self.target_w, self.n_kp))
            in_pkl = prefix + '-bb8_2d.pkl'
            with open(in_pkl, 'rb') as f:
                bb8_2d = pickle.load(f)
            for i, cid in enumerate(class_ids):
                class_mask = np.where(label_img == cid[0])
                kp_gt_map_x[class_mask] = bb8_2d[:, :, 0][i]
                kp_gt_map_y[class_mask] = bb8_2d[:, :, 1][i]

            # get image mask front (used to compute loss)
            mask_front = ma.getmaskarray(ma.masked_not_equal(label_img,
                                                             0)).astype(int)

            # return training data
            # input  : normalized RGB image
            # output : segmentation mask, x ground truth map, y ground truth map & mask front
            return (torch.from_numpy(img.transpose(2, 0,
                                                   1)).float().div(255.0),
                    torch.from_numpy(label_img).long(),
                    torch.from_numpy(kp_gt_map_x).float(),
                    torch.from_numpy(kp_gt_map_y).float(),
                    torch.from_numpy(mask_front).float())
Ejemplo n.º 22
0
    def getMetaData(self, index, mask=False, bbox=False, camera_matrix=False):
        returned_dict = {}
        object_label = self.list_obj[index]
        returned_dict['object_label'] = object_label
        # Transform matrix should be in 4x4 format
        rank = self.list_rank[index]

        if object_label == 2:
            for i in range(0, len(self.meta[object_label][rank])):
                if self.meta[object_label][rank][i]['obj_id'] == 2:
                    meta = self.meta[object_label][rank][i]
                    break
        else:
            meta = self.meta[object_label][rank][0]

        target_r = np.resize(np.array(meta['cam_R_m2c']), (3, 3))
        target_t = np.array(meta['cam_t_m2c'])

        transform_mat = np.identity(4)
        transform_mat[:3, :3] = target_r
        transform_mat[:3, 3] = target_t / 1000.0

        returned_dict['transform_mat'] = transform_mat

        if mask:
            path = self.list_depth[index]
            depth = self.getDepthImage(index)
            label = np.array(Image.open(self.list_label[index]))
            if (len(label.shape) > 2):
                label = label[:, :, 0]

            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))

            mask = mask_label * mask_depth
            returned_dict['mask'] = mask

        if bbox:  # needs to return x,y,w,h
            bbox = get_bbox(meta['obj_bb'])
            returned_dict['bbox'] = bbox

        if camera_matrix:
            returned_dict['camera_scale'] = 1.0
            returned_dict['camera_cx'] = 325.26110
            returned_dict['camera_cy'] = 242.04899
            returned_dict['camera_fx'] = 572.41140
            returned_dict['camera_fy'] = 573.57043

        return returned_dict
Ejemplo n.º 23
0
    def __get_masked_data_of_quality(variable, quality_data, level):
        """

        :type variable: Variable
        :type quality_data: Object
        :type level: int
        :rtype : ma.MaskedArray
        """
        mask = ma.masked_not_equal(quality_data, level).mask
        data = ma.array(variable[:], mask=mask)
        try:
            fill_value = variable.getncattr('_FillValue')
            return ma.array(data, mask=ma.masked_equal(data, fill_value).mask)
        except AttributeError:
            return data
Ejemplo n.º 24
0
    def __get_masked_data_of_quality(variable, quality_data, level):
        """

        :type variable: Variable
        :type quality_data: Object
        :type level: int
        :rtype : ma.MaskedArray
        """
        mask = ma.masked_not_equal(quality_data, level).mask
        data = ma.array(variable[:], mask=mask)
        try:
            fill_value = variable.getncattr('_FillValue')
            return ma.array(data, mask=ma.masked_equal(data, fill_value).mask)
        except AttributeError:
            return data
Ejemplo n.º 25
0
    def _check_mask_consistency(self, dataset, product_type):
        """

        :type dataset: Dataset
        :type product_type: ProductType
        """

        consistency_check_specs = product_type.get_mask_consistency_check_specs()
        if len(consistency_check_specs) == 0:
            return

        quality_variable_name = consistency_check_specs[0][2]
        quality_data = dataset.variables[quality_variable_name][:]
        quality_masks = {}
        for level in range(0, 6):
            level_mask = ma.masked_not_equal(quality_data, level).mask
            quality_masks.update({level: level_mask})

        for spec in consistency_check_specs:
            reference_variable_name = spec[0]
            objective_variable_name = spec[1]
            quality_variable_name = spec[2]

            if quality_variable_name in dataset.variables:
                quality_levels = spec[3]
                for l in quality_levels:
                    level_mask = quality_masks[l]
                    a = SstProductVerifier.__get_data_of_quality(dataset, reference_variable_name, level_mask).mask
                    b = SstProductVerifier.__get_data_of_quality(dataset, objective_variable_name, level_mask).mask
                    # false negatives: element is not masked in a, but masked in b
                    check_name = objective_variable_name + '.' + 'mask_false_negative_check_' + str(l)
                    self.__check_false_negatives(a, b, check_name)
                    # false positives: element is masked in a, but not masked in b
                    check_name = objective_variable_name + '.' + 'mask_false_positive_check_' + str(l)
                    self.__check_false_positives(a, b, check_name)
            else:
                a = SstProductVerifier.__get_data(dataset, reference_variable_name).mask
                b = SstProductVerifier.__get_data(dataset, objective_variable_name).mask
                # false negatives: element is not masked in a, but masked in b
                check_name = objective_variable_name + '.mask_false_negative_check'
                self.__check_false_negatives(a, b, check_name)
                # false positives: element is masked in a, but not masked in b
                check_name = objective_variable_name + '.' + 'mask_false_positive_check'
                self.__check_false_positives(a, b, check_name)
Ejemplo n.º 26
0
def filter_auxfiles(data, isets, auxf, auxpar, auxconds):

    if auxf:
        data = ma.asarray(data)

        for f in range(len(auxf)):
            catlg = cl.ClumpCatalog.from_file([auxf[f]])
            size = np.shape(catlg.clumps)[0]

            npars = len(auxpar[f])
            tmparr = np.zeros((npars, size))
            for i in range(size):
                for j in range(npars):
                    tmparr[j, i] = catlg.clumps[i].record[auxpar[f][j]]

            tmparr = ma.asarray(tmparr)
            fcond = auxconds[f]
            col = 0
            while fcond:
                cond = fcond.pop(0)
                cut = fcond.pop(0)

                if cond == ">":
                    mask_aux = ma.masked_greater(tmparr[col, :], cut)
                elif cond == "=":
                    mask_aux = ma.masked_equal(tmparr[col, :], cut)
                elif cond == "<":
                    mask_aux = ma.masked_less(tmparr[col, :], cut)
                elif cond == "!=":
                    mask_aux = ma.masked_not_equal(tmparr[col, :], cut)
                col += 1

                tmparr.mask = mask_aux.mask

        if np.shape(tmparr.mask):
            data.mask = tmparr.mask
        else:
            data = data.data

    return data
Ejemplo n.º 27
0
def filter_clumps(varmap, ifiles, defs, conds, inclumps):
    """Filter data in clumps"""

    mapinclumps = np.empty(ifiles, dtype=maps.Map)

    print("inclumps")
    if defs:
        print("defs")

        for i in range(ifiles):
            mapinclumps[i] = varmap[i].masked_where(ma.getmask(inclumps))
            print("  >>> number of valid pixels in clumps:", i, "=>",
                  mapinclumps[i].count())

        while len(conds) > 0:

            cond = conds.pop(0)
            val = conds.pop(0)

            if cond == ">":
                mskcl = ma.masked_greater(inclumps, val)
            elif cond == "=":
                mskcl = ma.masked_not_equal(inclumps, val)
            elif cond == "<":
                mskcl = ma.masked_less(inclumps, val)
            else:
                print("  ++ wrong condition in filter clumps", cond)
                sys.exit(1)

            for i in range(ifiles):
                mapinclumps[i] = mapinclumps[i].masked_where(ma.getmask(mskcl))
                print("  >>> number of valid pixels after clump filter:", i,
                      "=>", mapinclumps[i].count())

    else:
        for i in range(ifiles):
            mapinclumps[i] = varmap[i].copy()

    return mapinclumps
Ejemplo n.º 28
0
def count_matching_lesion_voxels(lesion_mx, segmentation_mx, lesion,
                                 segmentation):
    """
    Counts the number of voxels where 
        lesion_mx[index]       = lesion         and 
        segmentation_mx[index] = segmentation 
    for the same index

    Parameters
    ----------
    lesion_mx : 3D numpy.ndarray
        Matrix of labeled lesions.
    segmentation_mx : 3D numpy.ndarray
        Matrix of labeled brain segmentation.
    lesion : TYPE <int>
        Label for the lesion of interest.
    segmentation : TYPE <int>
        Label for the brain structure of interest.

    Returns
    -------
    TYPE <int>
        Count of matching voxels.

    """

    les_mx = lesion_mx.copy()

    # Set all voxels whose value differ from the lesion id to -1
    les_mx = ma.masked_not_equal(les_mx, lesion)
    les_mx.fill_value = -1
    les_mx = les_mx.filled()

    # Set all voxels whose value equal to the lesion id to 1
    les_mx[les_mx == lesion] = 1

    return np.sum(les_mx == segmentation_mx)
Ejemplo n.º 29
0
    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(eq(masked_where(greater_equal(x, 2), x),
                   masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(eq(masked_inside(array(list(range(5)),
                                       mask=[1, 0, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 1, 1, 0]))
        assert_(eq(masked_outside(array(list(range(5)),
                                        mask=[0, 1, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 0, 0, 1]))
        assert_(eq(masked_equal(array(list(range(5)),
                                      mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 0]))
        assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
                                          mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 1]))
        assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
                   [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))
def printCurve(take_idx, criterion):
    conf_tp_or_fn = [[] for i in range(5)]
    conf_fp = [[] for i in range(5)]
    prec = [[] for i in range(5)]
    recall = [[] for i in range(5)]

    if take_idx is 0:
        file_1_name = 'occ/under60.txt'
        file_2_name = 'occ/under60_frames.txt'

    elif take_idx is 1:
        file_1_name = 'occ/from60to80.txt'
        file_2_name = 'occ/f60t80_frames.txt'

    else:
        file_1_name = 'occ/up80.txt'
        file_2_name = 'occ/up80_frames.txt'

    xmap = np.array([[j for i in range(640)] for j in range(480)])
    ymap = np.array([[i for i in range(640)] for j in range(480)])

    with open(file_1_name, 'r') as f1:
        with open(file_2_name, 'r') as f2:
            while 1:
                input_line_test = f2.readline()
                # print(input_line_test)
                if not input_line_test:
                    break
                if input_line_test[-1:] == '\n':
                    input_line_test = input_line_test[:-1]
                _, test_scene_id, test_frame_id = input_line_test.split('/')

                input_line_test = '/'.join(
                    ['data_v1', test_scene_id, test_frame_id])
                # import pdb;pdb.set_trace()

                input_line_test_2 = f1.readline()
                test_obj_id = int(float(input_line_test_2.split()[2])) + 1
                test_idx = int(float(input_line_test_2.split()[1]))
                # import pdb;pdb.set_trace()

                img = Image.open('{0}/{1}-color.png'.format(
                    opt.dataset_root, input_line_test))
                depth = np.array(
                    Image.open('{0}/{1}-depth.png'.format(
                        opt.dataset_root, input_line_test)))
                label = np.array(
                    Image.open('{0}/{1}-label.png'.format(
                        opt.dataset_root, input_line_test)))
                meta = scio.loadmat('{0}/{1}-meta.mat'.format(
                    opt.dataset_root, input_line_test))

                cam_cx = 312.9869
                cam_cy = 241.3109
                cam_fx = 1066.778
                cam_fy = 1067.487
                mask_back = ma.getmaskarray(ma.masked_equal(label, 0))

                print('scene index: ', test_scene_id)
                print('object index: ', test_obj_id)
                mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
                mask_label = ma.getmaskarray(
                    ma.masked_equal(label, test_obj_id))
                mask = mask_label * mask_depth
                if not (len(mask.nonzero()[0]) > 50
                        and len(opt.symmetry[test_obj_id]['mirror']) > 0):
                    continue

                rmin, rmax, cmin, cmax = get_bbox(mask_label)
                img_temp = np.transpose(np.array(img)[:, :, :3],
                                        (2, 0, 1))[:, rmin:rmax, cmin:cmax]

                img_masked = img_temp
                target_r = meta['poses'][:, :, test_idx][:, 0:3]
                target_t = np.array(meta['poses'][:, :,
                                                  test_idx][:, 3:4].flatten())

                choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
                if len(choose) > opt.num_points:
                    c_mask = np.zeros(len(choose), dtype=int)
                    c_mask[:opt.num_points] = 1
                    np.random.shuffle(c_mask)
                    choose = choose[c_mask.nonzero()]
                else:
                    choose = np.pad(choose, (0, opt.num_points - len(choose)),
                                    'wrap')

                depth_masked = depth[
                    rmin:rmax,
                    cmin:cmax].flatten()[choose][:,
                                                 np.newaxis].astype(np.float32)
                xmap_masked = xmap[
                    rmin:rmax,
                    cmin:cmax].flatten()[choose][:,
                                                 np.newaxis].astype(np.float32)
                ymap_masked = ymap[
                    rmin:rmax,
                    cmin:cmax].flatten()[choose][:,
                                                 np.newaxis].astype(np.float32)
                choose = np.array([choose])

                cam_scale = meta['factor_depth'][0][0]
                pt2 = depth_masked / cam_scale
                pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
                pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
                cloud = np.concatenate((pt0, pt1, pt2), axis=1)

                target_sym = []
                for sym in opt.symmetry[test_obj_id]['mirror']:
                    target_sym.append(np.dot(sym, target_r.T))
                target_sym = np.array(target_sym)

                target_cen = np.add(opt.symmetry[test_obj_id]['center'],
                                    target_t)

                # print('ground truth norm: ', target_sym)
                # print('ground truth center: ', target_cen)
                points_ten, choose_ten, img_ten, target_sym_ten, target_cen_ten, idx_ten = \
                torch.from_numpy(cloud.astype(np.float32)).unsqueeze(0), \
                torch.LongTensor(choose.astype(np.int32)).unsqueeze(0), \
                opt.norm(torch.from_numpy(img_masked.astype(np.float32))).unsqueeze(0), \
                torch.from_numpy(target_sym.astype(np.float32)).unsqueeze(0), \
                torch.from_numpy(target_cen.astype(np.float32)).unsqueeze(0), \
                torch.LongTensor([test_obj_id-1]).unsqueeze(0)

                points_ten, choose_ten, img_ten, target_sym_ten, target_cen_ten, idx_ten = Variable(points_ten), \
                                                                    Variable(choose_ten), \
                                                                    Variable(img_ten), \
                                                                    Variable(target_sym_ten), \
                                                                    Variable(target_cen_ten), \
                                                                    Variable(idx_ten)

                pred_norm, pred_on_plane, emb = opt.estimator(
                    img_ten, points_ten, choose_ten, idx_ten)

                bs, num_p, _ = pred_on_plane.size()

                pred_norm = pred_norm / (torch.norm(pred_norm, dim=2).view(
                    bs, num_p, 1))
                pred_norm = pred_norm.detach().numpy()
                pred_on_plane = pred_on_plane.detach().numpy()
                points = points_ten.detach().numpy()

                clustering_points_idx = np.where(pred_on_plane > max(
                    0.5,
                    pred_on_plane.max() * PRED_ON_PLANE_FACTOR +
                    pred_on_plane.mean() * (1 - PRED_ON_PLANE_FACTOR)))[1]
                clustering_norm = pred_norm[0, clustering_points_idx, :]
                clustering_points = points[0, clustering_points_idx, :]
                num_points = len(clustering_points_idx)

                # print(pred_on_plane.max())
                # import pdb;pdb.set_trace()

                close_thresh = 2e-3
                broad_thresh = 3e-3

                sym_conf = np.zeros((5, target_sym.shape[0]))

                count_pred = 0

                # import pdb; pdb.set_trace()

                while True:
                    count_pred += 1
                    if num_points <= 20 or count_pred > 3:
                        break

                    best_fit_num = 0

                    count_try = 0

                    for j in range(10):

                        pick_idx = np.random.randint(0, num_points - 1)
                        pick_point = clustering_points[pick_idx]
                        # proposal_norm = np.array(Plane(Point3D(pick_points[0]),Point3D(pick_points[1]),Point3D(pick_points[2])).normal_vector).astype(np.float32)
                        proposal_norm = clustering_norm[pick_idx]
                        proposal_norm = proposal_norm[:, np.newaxis]

                        # import pdb;pdb.set_trace()
                        proposal_point = pick_point

                        clustering_diff = clustering_points - proposal_point
                        clustering_dist = np.abs(
                            np.matmul(clustering_diff, proposal_norm))

                        broad_inliers = np.where(
                            clustering_dist < broad_thresh)[0]
                        broad_inlier_num = len(broad_inliers)

                        close_inliers = np.where(
                            clustering_dist < close_thresh)[0]
                        close_inlier_num = len(close_inliers)

                        norm_dist = np.abs(clustering_norm -
                                           np.transpose(proposal_norm)).sum(1)
                        close_norm_idx = np.where(norm_dist < 0.6)[0]
                        close_norm_num = len(close_norm_idx)

                        if close_inlier_num >= best_fit_num and broad_inlier_num >= num_points / (
                                4 - count_pred
                        ) * 0.9 and close_norm_num >= num_points / (
                                4 - count_pred) * 0.9:
                            best_fit_num = close_inlier_num
                            best_fit_norm = proposal_norm
                            best_fit_cen = clustering_points[
                                close_inliers].mean(0)
                            best_fit_idx = clustering_points_idx[close_inliers]
                            best_norm_dist = norm_dist
                            best_close_norm_idx = np.where(
                                best_norm_dist < 0.6)[0]

                    if best_fit_num == 0 or num_points <= 20:
                        break

                    clustering_points_same_sym = clustering_points[
                        best_close_norm_idx]

                    clustering_diff_same_sym = clustering_points_same_sym - best_fit_cen
                    clustering_dist_same_sym = np.abs(
                        np.matmul(clustering_diff_same_sym, best_fit_norm))

                    close_inliers = np.where(
                        clustering_dist_same_sym < close_thresh)[0]
                    close_inlier_num = len(close_inliers)

                    best_fit_num = close_inlier_num

                    broad_inliers = np.where(
                        clustering_dist_same_sym < broad_thresh)[0]
                    broad_inlier_num = len(broad_inliers)

                    def f(x):
                        dist = 0
                        # import pdb;pdb.set_trace()
                        for point in clustering_points_same_sym[broad_inliers]:
                            dist += np.abs(
                                (point * x[0:3]).sum() + x[3]) / np.sqrt(
                                    np.sum(np.square(x[0:3]), axis=0))

                        return dist

                    start_point = np.zeros(4)
                    start_point[0:3] = np.copy(best_fit_norm[:, 0])
                    start_point[3] = (-best_fit_cen *
                                      best_fit_norm[:, 0]).sum()

                    min_point = fmin(f, start_point, maxiter=50)

                    # import pdb;pdb.set_trace()
                    min_point = min_point / np.sqrt(
                        np.sum(np.square(min_point[0:3]), axis=0))

                    x_val = -(min_point[3] + best_fit_cen[1] * min_point[1] +
                              best_fit_cen[2] * min_point[2]) / min_point[0]

                    y_val = -(min_point[3] + best_fit_cen[0] * min_point[0] +
                              best_fit_cen[2] * min_point[2]) / min_point[1]

                    z_val = -(min_point[3] + best_fit_cen[0] * min_point[0] +
                              best_fit_cen[1] * min_point[1]) / min_point[2]

                    if np.abs(x_val) < 1:
                        new_pred_loc = np.array(
                            [x_val, best_fit_cen[1], best_fit_cen[2]])
                    elif np.abs(z_val) < 1:
                        new_pred_loc = np.array(
                            [best_fit_cen[0], best_fit_cen[1], z_val])
                    else:
                        new_pred_loc = np.array(
                            [best_fit_cen[0], y_val, best_fit_cen[2]])

                    new_proposal_norm = min_point[0:3]
                    clustering_diff = clustering_points_same_sym - new_pred_loc
                    clustering_dist = np.abs(
                        np.matmul(clustering_diff, new_proposal_norm))

                    close_inliers = np.where(clustering_dist < close_thresh)[0]
                    new_close_inlier_num = len(close_inliers)

                    broad_inliers = np.where(clustering_dist < broad_thresh)[0]
                    new_broad_inlier_num = len(broad_inliers)
                    # import pdb;pdb.set_trace()
                    if new_close_inlier_num >= close_inlier_num:
                        best_fit_num = new_close_inlier_num
                        best_fit_norm = new_proposal_norm[:, np.newaxis]
                        best_fit_cen = new_pred_loc

                    if best_fit_num == 0:
                        break
                    else:

                        print('predicted norm:{}, predicted point:{}'.format(
                            best_fit_norm, best_fit_cen))

                        max_idx = np.argmax(
                            np.abs(np.matmul(target_sym, best_fit_norm)))
                        sym_product = np.abs(
                            np.matmul(target_sym, best_fit_norm)[max_idx][0])
                        sym_dist = np.abs((target_sym[max_idx] *
                                           (best_fit_cen - target_cen)).sum())

                        norm_dist = np.abs(clustering_norm -
                                           np.transpose(best_fit_norm)).sum(1)
                        scrub_close_norm_idx = np.where(norm_dist < 1.3)[0]

                        # import pdb;pdb.set_trace()
                        predicted_confidence = best_fit_num / len(
                            best_close_norm_idx) - np.abs(
                                clustering_norm[best_close_norm_idx] -
                                np.transpose(best_fit_norm)).mean() * 3 * 1.5
                        predicted_confidence = max(0, predicted_confidence)

                        for dist_idx in range(5):
                            if sym_product > PRODUCT_THRESHOLD and sym_dist < (
                                    dist_idx + 1) * 0.01:
                                # import pdb;pdb.set_trace()
                                sym_conf[dist_idx, max_idx] = max(
                                    sym_conf[dist_idx, max_idx],
                                    predicted_confidence)

                            else:
                                conf_fp[dist_idx].append(predicted_confidence)

                        clustering_points_idx = np.setdiff1d(
                            clustering_points_idx,
                            clustering_points_idx[scrub_close_norm_idx])

                        clustering_norm = pred_norm[0,
                                                    clustering_points_idx, :]
                        clustering_points = points[0, clustering_points_idx, :]

                        num_points = len(clustering_points_idx)
                        # import pdb;pdb.set_trace()

                # import pdb;pdb.set_trace()

                for dist_idx in range(5):
                    for i in range(target_sym.shape[0]):
                        conf_tp_or_fn[dist_idx].append(sym_conf[dist_idx, i])
                # import pdb;pdb.set_trace()

    # import pdb;pdb.set_trace()

    print(conf_tp_or_fn)
    print(conf_fp)

    # import pdb;pdb.set_trace()

    for dist_idx in range(5):
        for t in range(1, 1001):
            conf_thresh = t / 1000

            true_positives = len(
                np.where(np.array(conf_tp_or_fn[dist_idx]) >= conf_thresh)[0])
            false_negatives = len(
                np.where(np.array(conf_tp_or_fn[dist_idx]) < conf_thresh)[0])
            false_positives = len(
                np.where(np.array(conf_fp[dist_idx]) >= conf_thresh)[0])
            if false_positives + true_positives > 0 and true_positives + false_negatives > 0:
                prec[dist_idx].append(true_positives /
                                      (false_positives + true_positives))
                recall[dist_idx].append(true_positives /
                                        (true_positives + false_negatives))

    return prec, recall
Ejemplo n.º 31
0
    def __getitem__(self, index):
        img = Image.open('{0}/{1}-color.png'.format(self.root,
                                                    self.list[index]))
        depth = np.array(
            Image.open('{0}/{1}-depth.png'.format(self.root,
                                                  self.list[index])))
        label = np.array(
            Image.open('{0}/{1}-label.png'.format(self.root,
                                                  self.list[index])))
        meta = scio.loadmat('{0}/{1}-meta.mat'.format(self.root,
                                                      self.list[index]))

        if self.list[index][:8] != 'data_syn' and int(
                self.list[index][5:9]) >= 60:
            cam_cx = self.cam_cx_2
            cam_cy = self.cam_cy_2
            cam_fx = self.cam_fx_2
            cam_fy = self.cam_fy_2
        else:
            cam_cx = self.cam_cx_1
            cam_cy = self.cam_cy_1
            cam_fx = self.cam_fx_1
            cam_fy = self.cam_fy_1

        mask_back = ma.getmaskarray(ma.masked_equal(label, 0))

        add_front = False
        if self.add_noise:
            for k in range(5):
                seed = random.choice(self.syn)
                front = np.array(
                    self.trancolor(
                        Image.open('{0}/{1}-color.png'.format(
                            self.root, seed)).convert("RGB")))
                front = np.transpose(front, (2, 0, 1))
                f_label = np.array(
                    Image.open('{0}/{1}-label.png'.format(self.root, seed)))
                front_label = np.unique(f_label).tolist()[1:]
                if len(front_label) < self.front_num:
                    continue
                front_label = random.sample(front_label, self.front_num)
                for f_i in front_label:
                    mk = ma.getmaskarray(ma.masked_not_equal(f_label, f_i))
                    if f_i == front_label[0]:
                        mask_front = mk
                    else:
                        mask_front = mask_front * mk
                t_label = label * mask_front
                if len(t_label.nonzero()[0]) > 1000:
                    label = t_label
                    add_front = True
                    break

        obj = meta['cls_indexes'].flatten().astype(np.int32)

        while 1:
            idx = np.random.randint(0, len(obj))
            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(label, obj[idx]))
            mask = mask_label * mask_depth
            if len(mask.nonzero()[0]) > self.minimum_num_pt:
                break

        if self.add_noise:
            img = self.trancolor(img)

        rmin, rmax, cmin, cmax = get_bbox(mask_label)
        img = np.transpose(np.array(img)[:, :, :3], (2, 0, 1))[:, rmin:rmax,
                                                               cmin:cmax]

        if self.list[index][:8] == 'data_syn':
            seed = random.choice(self.real)
            back = np.array(
                self.trancolor(
                    Image.open('{0}/{1}-color.png'.format(
                        self.root, seed)).convert("RGB")))
            back = np.transpose(back, (2, 0, 1))[:, rmin:rmax, cmin:cmax]
            img_masked = back * mask_back[rmin:rmax, cmin:cmax] + img
        else:
            img_masked = img

        if self.add_noise and add_front:
            img_masked = img_masked * mask_front[
                rmin:rmax, cmin:cmax] + front[:, rmin:rmax, cmin:cmax] * ~(
                    mask_front[rmin:rmax, cmin:cmax])

        if self.list[index][:8] == 'data_syn':
            img_masked = img_masked + np.random.normal(
                loc=0.0, scale=7.0, size=img_masked.shape)

        # p_img = np.transpose(img_masked, (1, 2, 0))
        # scipy.misc.imsave('temp/{0}_input.png'.format(index), p_img)
        # scipy.misc.imsave('temp/{0}_label.png'.format(index), mask[rmin:rmax, cmin:cmax].astype(np.int32))

        target_r = meta['poses'][:, :, idx][:, 0:3]
        target_t = np.array([meta['poses'][:, :, idx][:, 3:4].flatten()])
        add_t = np.array([
            random.uniform(-self.noise_trans, self.noise_trans)
            for i in range(3)
        ])

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) > self.num_pt:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num_pt] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num_pt - len(choose)), 'wrap')

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])

        cam_scale = meta['factor_depth'][0][0]
        pt2 = depth_masked / cam_scale
        pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
        pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        if self.add_noise:
            cloud = np.add(cloud, add_t)

        # fw = open('temp/{0}_cld.xyz'.format(index), 'w')
        # for it in cloud:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        # fw.close()

        dellist = [j for j in range(0, len(self.cld[obj[idx]]))]
        if self.refine:
            dellist = random.sample(
                dellist,
                len(self.cld[obj[idx]]) - self.num_pt_mesh_large)
        else:
            dellist = random.sample(
                dellist,
                len(self.cld[obj[idx]]) - self.num_pt_mesh_small)
        model_points = np.delete(self.cld[obj[idx]], dellist, axis=0)

        # fw = open('temp/{0}_model_points.xyz'.format(index), 'w')
        # for it in model_points:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        # fw.close()

        target = np.dot(model_points, target_r.T)
        if self.add_noise:
            target = np.add(target, target_t + add_t)
        else:
            target = np.add(target, target_t)

        # fw = open('temp/{0}_tar.xyz'.format(index), 'w')
        # for it in target:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        # fw.close()

        return torch.from_numpy(cloud.astype(np.float32)), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy(img_masked.astype(np.float32))), \
               torch.from_numpy(target.astype(np.float32)), \
               torch.from_numpy(model_points.astype(np.float32)), \
               torch.LongTensor([int(obj[idx]) - 1])
Ejemplo n.º 32
0
    def __getitem__(self, index):
        img = Image.open(self.list_rgb[index])
        ori_img = np.array(img)
        depth = np.array(Image.open(self.list_depth[index]))
        label = np.array(Image.open(self.list_label[index]))
        obj = self.list_obj[index]
        rank = self.list_rank[index]        

        if obj == 2:
            for i in range(0, len(self.meta[obj][rank])):
                if self.meta[obj][rank][i]['obj_id'] == 2:
                    meta = self.meta[obj][rank][i]
                    break
        else:
            meta = self.meta[obj][rank][0]

        mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
        if self.mode == 'eval':
            mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
        else:
            mask_label = ma.getmaskarray(ma.masked_equal(label, np.array([255, 255, 255])))[:, :, 0]
        
        mask = mask_label * mask_depth

        if self.add_noise:
            img = self.trancolor(img)

        img = np.array(img)[:, :, :3]
        img = np.transpose(img, (2, 0, 1))
        img_masked = img

        rmin, rmax, cmin, cmax = get_bbox(meta['obj_bb'])

        img_masked = img_masked[:, rmin:rmax, cmin:cmax]
        #p_img = np.transpose(img_masked, (1, 2, 0))
        #scipy.misc.imsave('evaluation_result/{0}_input.png'.format(index), p_img)

        target_r = np.resize(np.array(meta['cam_R_m2c']), (3, 3))
        target_t = np.array(meta['cam_t_m2c'])
        add_t = np.array([random.uniform(-self.noise_trans, self.noise_trans) for i in range(3)])

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) == 0:
            cc = torch.LongTensor([0])
            return(cc, cc, cc, cc, cc, cc)

        if len(choose) > self.num:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num - len(choose)), 'wrap')
        
        depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])

        cam_scale = 1.0
        pt2 = depth_masked / cam_scale
        pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
        pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        cloud = np.add(cloud, -1.0 * target_t) / 1000.0
        cloud = np.add(cloud, target_t / 1000.0)

        if self.add_noise:
            cloud = np.add(cloud, add_t)

        #fw = open('evaluation_result/{0}_cld.xyz'.format(index), 'w')
        #for it in cloud:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))	
        #fw.close()

        model_points = self.pt[obj] / 1000.0
        dellist = [j for j in range(0, len(model_points))]
        dellist = random.sample(dellist, len(model_points) - self.num_pt_mesh_small)
        model_points = np.delete(model_points, dellist, axis=0)

        #fw = open('evaluation_result/{0}_model_points.xyz'.format(index), 'w')
        #for it in model_points:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        #fw.close()

        target = np.dot(model_points, target_r.T)
        if self.add_noise:
            target = np.add(target, target_t / 1000.0 + add_t)
            out_t = target_t / 1000.0 + add_t
        else:
            target = np.add(target, target_t / 1000.0)
            out_t = target_t / 1000.0

        #fw = open('evaluation_result/{0}_tar.xyz'.format(index), 'w')
        #for it in target:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        #fw.close()

        return torch.from_numpy(cloud.astype(np.float32)), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy(img_masked.astype(np.float32))), \
               torch.from_numpy(target.astype(np.float32)), \
               torch.from_numpy(model_points.astype(np.float32)), \
               torch.LongTensor([self.objlist.index(obj)])
Ejemplo n.º 33
0
    my_result_wo_refine = []
    my_result = []

    for idx in range(len(detected_classIDs)):
        itemid = detected_classIDs[idx]
        
        maskid = idx + 1
        try:
            mask = ma.getmaskarray(ma.masked_equal(masks, maskid))
            rmin, rmax, cmin, cmax = get_bbox(mask)

            print('itemid: {0}\n'.format(itemid))
            print('rmin {0}, rmax {1}, cmin {2}, cmax {3}'.format(rmin, rmax, cmin, cmax))

            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(masks, maskid))
            mask = mask_label * mask_depth
            choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]

            if len(choose) > num_points:
                c_mask = np.zeros(len(choose), dtype=int)
                c_mask[:num_points] = 1
                np.random.shuffle(c_mask)
                choose = choose[c_mask.nonzero()]
            else:
                choose = np.pad(choose, (0, num_points - len(choose)), 'wrap')

            depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
            xmap_masked = xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
            ymap_masked = ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
Ejemplo n.º 34
0
    def callback(self, rgb, depth):
        if DEBUG:
            print('received depth image of type: ' + depth.encoding)
            print('received rgb image of type: ' + rgb.encoding)
        #https://answers.ros.org/question/64318/how-do-i-convert-an-ros-image-into-a-numpy-array/
        depth = np.frombuffer(depth.data,
                              dtype=np.uint16).reshape(depth.height,
                                                       depth.width, -1)
        rgb = np.frombuffer(rgb.data,
                            dtype=np.uint8).reshape(rgb.height, rgb.width, -1)
        rgb_original = rgb
        #cv2.imshow('depth', depth)

        #time1 = time.time()
        rgb = np.transpose(rgb, (2, 0, 1))
        rgb = norm(torch.from_numpy(rgb.astype(np.float32)))
        rgb = Variable(rgb).cuda()
        semantic = self.model(rgb.unsqueeze(0))
        _, pred = torch.max(semantic, dim=1)
        pred = pred * 255
        pred = np.transpose(pred, (1, 2, 0))  # (CxHxW)->(HxWxC)
        #print(pred.shape)

        #ret, threshold = cv2.threshold(pred.cpu().numpy(), 1, 255, cv2.THRESH_BINARY)    #pred is already binary, therefore, this line is unnecessary
        contours, hierarchy = cv2.findContours(np.uint8(pred),
                                               cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)
        cnt = max(contours, key=cv2.contourArea)
        x, y, w, h = cv2.boundingRect(cnt)
        rmin, rmax, cmin, cmax = get_bbox([x, y, w, h])
        #cv2.rectangle(rgb_original,(cmin,rmin), (cmax,rmax) , (0,255,0),2)
        #cv2.imwrite('depth.png', depth)          #save depth image

        mask_depth = ma.getmasksarray(ma.masked_not_equal(depth, 0))
        mask_label = ma.getmaskarray(ma.masked_equal(pred, np.array(255)))
        mask = mask_depth * mask_label

        #print(rgb.shape)             #torch.Size([3, 480, 640])
        #print(rgb_original.shape)    #(480, 640, 3)
        img = np.transpose(rgb_original, (2, 0, 1))
        img_masked = img[:, rmin:rmax, cmin:cmax]

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]

        #print("length of choose is :{0}".format(len(choose)))
        if len(choose) == 0:
            cc = torch.LongTensor([0])
            return (cc, cc, cc, cc, cc, cc)

        if len(choose) > num_points:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:
                   num_points] = 1  # if number of object pixels are bigger than 500, we select just 500
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]  # now len(choose) = 500
        else:
            choose = np.pad(choose, (0, num_points - len(choose)), 'wrap')

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)

        choose = np.array([choose])

        pt2 = depth_masked
        #print(pt2)
        pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
        pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        cloud = cloud / 1000

        points = torch.from_numpy(cloud.astype(np.float32))
        choose = torch.LongTensor(choose.astype(np.int32))
        img = norm(torch.from_numpy(img_masked.astype(np.float32)))
        idx = torch.LongTensor([self.object_index])

        img = Variable(img).cuda().unsqueeze(0)
        points = Variable(points).cuda().unsqueeze(0)
        choose = Variable(choose).cuda().unsqueeze(0)
        idx = Variable(idx).cuda().unsqueeze(0)

        pred_r, pred_t, pred_c, emb = self.estimator(img, points, choose, idx)
        pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, num_points, 1)
        pred_c = pred_c.view(bs, num_points)
        how_max, which_max = torch.max(pred_c, 1)
        pred_t = pred_t.view(bs * num_points, 1, 3)

        my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()
        my_t = (points.view(bs * num_points, 1, 3) +
                pred_t)[which_max[0]].view(-1).cpu().data.numpy()
        my_pred = np.append(my_r, my_t)

        for ite in range(0, iteration):
            T = Variable(torch.from_numpy(
                my_t.astype(np.float32))).cuda().view(1, 3).repeat(
                    num_points, 1).contiguous().view(1, num_points, 3)
            my_mat = quaternion_matrix(my_r)
            R = Variable(torch.from_numpy(my_mat[:3, :3].astype(
                np.float32))).cuda().view(1, 3, 3)
            my_mat[0:3, 3] = my_t

            new_points = torch.bmm((points - T), R).contiguous()
            pred_r, pred_t = self.refiner(new_points, emb, idx)
            pred_r = pred_r.view(1, 1, -1)
            pred_r = pred_r / (torch.norm(pred_r, dim=2).view(1, 1, 1))
            my_r_2 = pred_r.view(-1).cpu().data.numpy()
            my_t_2 = pred_t.view(-1).cpu().data.numpy()
            my_mat_2 = quaternion_matrix(my_r_2)
            my_mat_2[0:3, 3] = my_t_2

            my_mat_final = np.dot(
                my_mat,
                my_mat_2)  # refine pose means two matrix multiplication
            my_r_final = copy.deepcopy(my_mat_final)
            my_r_final[0:3, 3] = 0
            my_r_final = quaternion_from_matrix(my_r_final, True)
            my_t_final = np.array(
                [my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]])

            my_pred = np.append(my_r_final, my_t_final)
            my_r = my_r_final
            my_t = my_t_final

        my_r = quaternion_matrix(my_r)[:3, :3]
        #print(my_t.shape)
        my_t = np.array(my_t)
        #print(my_t.shape)
        #print(my_r.shape)

        target = np.dot(self.scaled, my_r.T)
        target = np.add(target, my_t)

        p0 = (int((target[0][0] / target[0][2]) * self.cam_fx + self.cam_cx),
              int((target[0][1] / target[0][2]) * self.cam_fy + self.cam_cy))
        p1 = (int((target[1][0] / target[1][2]) * self.cam_fx + self.cam_cx),
              int((target[1][1] / target[1][2]) * self.cam_fy + self.cam_cy))
        p2 = (int((target[2][0] / target[2][2]) * self.cam_fx + self.cam_cx),
              int((target[2][1] / target[2][2]) * self.cam_fy + self.cam_cy))
        p3 = (int((target[3][0] / target[3][2]) * self.cam_fx + self.cam_cx),
              int((target[3][1] / target[3][2]) * self.cam_fy + self.cam_cy))
        p4 = (int((target[4][0] / target[4][2]) * self.cam_fx + self.cam_cx),
              int((target[4][1] / target[4][2]) * self.cam_fy + self.cam_cy))
        p5 = (int((target[5][0] / target[5][2]) * self.cam_fx + self.cam_cx),
              int((target[5][1] / target[5][2]) * self.cam_fy + self.cam_cy))
        p6 = (int((target[6][0] / target[6][2]) * self.cam_fx + self.cam_cx),
              int((target[6][1] / target[6][2]) * self.cam_fy + self.cam_cy))
        p7 = (int((target[7][0] / target[7][2]) * self.cam_fx + self.cam_cx),
              int((target[7][1] / target[7][2]) * self.cam_fy + self.cam_cy))

        cv2.line(rgb_original, p0, p1, (255, 255, 255), 2)
        cv2.line(rgb_original, p0, p3, (255, 255, 255), 2)
        cv2.line(rgb_original, p0, p4, (255, 255, 255), 2)
        cv2.line(rgb_original, p1, p2, (255, 255, 255), 2)
        cv2.line(rgb_original, p1, p5, (255, 255, 255), 2)
        cv2.line(rgb_original, p2, p3, (255, 255, 255), 2)
        cv2.line(rgb_original, p2, p6, (255, 255, 255), 2)
        cv2.line(rgb_original, p3, p7, (255, 255, 255), 2)
        cv2.line(rgb_original, p4, p5, (255, 255, 255), 2)
        cv2.line(rgb_original, p4, p7, (255, 255, 255), 2)
        cv2.line(rgb_original, p5, p6, (255, 255, 255), 2)
        cv2.line(rgb_original, p6, p7, (255, 255, 255), 2)

        #print('estimated rotation is :{0}'.format(my_r))
        #print('estimated translation is :{0}'.format(my_t))

        #time2 = time.time()
        #print('inference time is :{0}'.format(time2-time1))
        cv2.imshow('rgb',
                   cv2.cvtColor(rgb_original,
                                cv2.COLOR_BGR2RGB))  # OpenCV uses BGR model
        cv2.waitKey(
            1
        )  # pass any integr except 0, as 0 will freeze the display windows
import numpy as np
import numpy.ma as ma

a = np.arange(4)
a
ma.masked_not_equal(a, 2)
Ejemplo n.º 36
0
    def cf_jackknife(self, ignore_regions=[], estimator='landy-szalay',
                     random_oversample=None, save_steps_file=None,
                     name='jackknife', clobber=False):
        #This takes a divided mask and performs the correlation
        #function calculation on the field with each sub-region
        #removed in turn.

        if (name in self._cfs.keys()) and not clobber:
            raise ValueError("CorrelationFunction.cf_jackknife says: "
                             "There's already a CF by that name.  Please "
                             "choose another or overwrite by calling with "
                             "clobber=True")

        #Check to make sure we have everything we need
        self.__check_cf_setup(need_subregions=True, check_trees=False,
                              random_oversample=random_oversample)

        #Make a new CorrelationFunction instance and set the basic info
        #First make a dictionary of the arguments to pass because it's ugly
        info={'name'            : name,
             'cf_type'          : 'jackknife',
             'ngals'            : self._n_objects,
             'theta_bin_object' : copy.deepcopy(self._theta_bins),
             'estimator'        : estimator
             }
        self._cfs[name] = cfclass.CorrelationFunction(**info)
        centers, edges = self._cfs[name].get_thetas(unit='degrees')
        
        #pull out the unique subregion numbers and figure out which to use
        regions=np.asarray(list(set(self._subregion_number)))
        use_regions=[r for r in regions if (r not in ignore_regions) and (r != -1)]
        use_regions=np.array(use_regions)
        n_jacks=len(use_regions)

        #Figure out where the randoms are
        random_subregions=self._image_mask.return_subregions(self._ra_random,
                                                             self._dec_random)
        
        #Now loop through the regions that you should be using 
        #and calculate the correlation function leaving out each
        jackknife_jacks = {}
        #Make a mask that takes out all the galaxies that aren't in use_regions
        valid_subregion = ma.masked_not_equal(self._subregion_number, -1).mask
        random_valid_subregion=ma.masked_not_equal(random_subregions, -1).mask
        for bad_reg in ignore_regions:
            this_mask = ma.masked_not_equal(self._subregion_number, bad_reg).mask
            valid_subregion = valid_subregion & this_mask
            this_mask = ma.masked_not_equal(random_subregions, bad_reg).mask
            random_valid_subregion = random_valid_subregion & this_mask        

        temp = np.zeros((n_jacks, len(self._cf_thetas)))
        for i, r in enumerate(use_regions):
            #Make the mask for the data
            not_region_r = ma.masked_not_equal(self._subregion_number, r).mask  
            this_jackknife = valid_subregion & not_region_r & self._use  
            
            #Make the mask for the randoms
            random_not_region_r = ma.masked_not_equal(random_subregions, r).mask
            random_this_jackknife = random_not_region_r & random_valid_subregion

            #Do the calculation for this jackknife and store it
            print "calculating jackknife", i
            jackknife_jacks[r] = corr.two_point_angular(self._ra[this_jackknife], 
                                                        self._dec[this_jackknife], 
                                                        edges, method=estimator, 
                                                        ra_R = self._ra_random[random_this_jackknife],
                                                        dec_R = self._dec_random[random_this_jackknife])
            temp[i]=jackknife_jacks[r]
            if (save_steps_file is not None):
                    jackknife_cf=np.nanmean(temp[0:i+1], axis=0)
                    jackknife_cf_err=np.nanstd(temp[0:i+1], axis=0)
                    self._cfs[name].set_cf(jackknife_cf, jackknife_cf_err,
                                           iterations=bootstrap_boots)
                    self.save_cfs(save_steps_file, cf_keys=[name])
            
        #Now that we have all of the jackknifes (jackknives?), calculate the mean
        # and variance.
        jackknife_cf=np.nanmean(temp, axis=0)
        jackknife_cf_err=np.nanstd(temp, axis=0)
        self._cfs[name].set_cf(jackknife_cf, jackknife_cf_err,
                               iterations=bootstrap_boots)
Ejemplo n.º 37
0
def main():
    cfg = setup_config()
    pipeline = rs.pipeline()
    realsense_cfg = setup_realsense()
    pipeline.start(realsense_cfg)  # Start streaming
    visualizer = predictor.VisualizationDemo(cfg)

    ref_frame_axies = []
    ref_frame_label = []
    min_distance = 0.9
    label_cnt = 0
    frameth = 0

    my_t_pool = {}
    my_r_pool = {}

    while True:
        frameth += 1
        cur_frame_axies = []
        cur_frame_label = []
        my_t_per_frame = []
        my_r_per_frame = []

        align = rs.align(rs.stream.color)
        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)

        rgb = aligned_frames.get_color_frame()
        rgb = np.asanyarray(rgb.get_data())
        frame = rgb.copy()

        # Do instance segmentation
        start = time.time()
        segmentation, vis = visualizer.run_on_image(frame)
        #print("Time = " + str(time.time()-start))

        cv2.imshow('Mask', vis)
        cv2.waitKey(1)

        # Get segmentation mask
        ori_label = segmentation['instances'].pred_masks.cpu().numpy()
        label = np.sum(ori_label, axis=0).astype(np.uint8)
        label = np.where(label != 0, 255, label)
        label = Image.fromarray(label).convert("L")
        label = np.asarray(label.convert('RGB')).astype(np.uint8)

        bboxes = segmentation['instances'].pred_boxes.tensor.cpu().numpy()
        xyxy_bboxes = bboxes
        bboxes = bbox_convert(bboxes)

        if len(bboxes) > 0:
            #depth_frames = frames.get_depth_frame()
            depth_frames = aligned_frames.get_depth_frame()

            video_profile = depth_frames.profile.as_video_stream_profile()
            intr = video_profile.get_intrinsics()
            depth = np.asanyarray(depth_frames.get_data())
            #centers = segmentation['instances'].pred_boxes.get_centers()
            if len(my_t_pool) > 0:
                last_key = list(my_t_pool.keys())[-1]

            for i in range(0, len(bboxes)):
                bbox_xyxy = np.array(list(xyxy_bboxes[i]))
                bbox = list(bboxes[i])
                print("Bounding Box:" + str(bbox))
                #center = bboxes[i].get_centers()
                #center = centers[i].cpu().numpy()
                num_idx = float('nan')
                max_value = 0

                label_of_object = ori_label[i].astype(np.uint8)
                label_of_object = np.where(label_of_object != 0, 255,
                                           label_of_object)
                label_of_object = Image.fromarray(label_of_object).convert("L")
                label_of_object = np.asarray(
                    label_of_object.convert('RGB')).astype(np.uint8)

                if len(ref_frame_label) > 0:
                    iou_list = []
                    b = bbox_xyxy
                    a = np.array(ref_frame_axies)
                    for k in range(len(ref_frame_axies)):
                        iou = iou_score(a[k], b)
                        iou_list.append(iou)
                    iou_list = np.array(iou_list)
                    max_value = iou_list.max()
                    if (max_value > min_distance):
                        min_idx = np.where(iou_list == max_value)[0][0]
                        num_idx = ref_frame_label[min_idx]

                if (math.isnan(num_idx)):
                    num_idx = label_cnt
                    label_cnt += 1
                cur_frame_label.append(num_idx)
                cur_frame_axies.append(bbox_xyxy)

                print(max_value)
                if (frameth == 1) or (max_value < 0.9) or (
                        i > len(my_t_pool[last_key]) - 1) or (frameth % 20
                                                              == 0):
                    pos_text = (bbox[0], bbox[1])

                    class_id = segmentation['instances'].pred_classes[i].cpu(
                    ).data.numpy()
                    print("Class: " + str(class_id))
                    #idx = class_id
                    if class_id == 0:
                        idx = 0
                    if class_id == 2:
                        idx = 1

                    model_points = model_points_list[idx]

                    mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
                    #mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
                    mask_label = ma.getmaskarray(
                        ma.masked_equal(label_of_object,
                                        np.array([255, 255, 255])))[:, :, 0]
                    mask = mask_label * mask_depth

                    rmin, rmax, cmin, cmax = posenet_deploy.get_bbox(bbox)

                    # choose
                    choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
                    if len(choose) == 0:
                        choose = torch.LongTensor([0])
                    if len(choose) > num_points:
                        c_mask = np.zeros(len(choose), dtype=int)
                        c_mask[:num_points] = 1
                        np.random.shuffle(c_mask)
                        choose = choose[c_mask.nonzero()]
                    else:
                        choose = np.pad(choose, (0, num_points - len(choose)),
                                        'wrap')

                    depth_masked = depth[
                        rmin:rmax,
                        cmin:cmax].flatten()[choose][:, np.newaxis].astype(
                            np.float32)
                    xmap_masked = xmap[
                        rmin:rmax,
                        cmin:cmax].flatten()[choose][:, np.newaxis].astype(
                            np.float32)
                    ymap_masked = ymap[
                        rmin:rmax,
                        cmin:cmax].flatten()[choose][:, np.newaxis].astype(
                            np.float32)
                    choose = np.array([choose])

                    # point cloud
                    pt2 = depth_masked / cam_scale
                    pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
                    pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
                    cloud = np.concatenate((pt0, pt1, pt2), axis=1)
                    cloud = cloud / 1000.0
                    # print(cloud.shape)

                    # cropped img
                    #img_masked = rgb[:, :, :3]
                    img_masked = rgb[:, :, ::-1]  # bgr to rgb
                    img_masked = np.transpose(img_masked, (2, 0, 1))
                    img_masked = img_masked[:, rmin:rmax, cmin:cmax]

                    my_mask = np.transpose(label_of_object, (2, 0, 1))
                    my_mask = my_mask[:, rmin:rmax, cmin:
                                      cmax]  ## Added by me to crop the mask
                    mask_img = np.transpose(my_mask, (1, 2, 0))
                    img_rgb = np.transpose(img_masked, (1, 2, 0))
                    croped_img_mask = cv2.bitwise_and(img_rgb, mask_img)
                    crop_image_to_check = croped_img_mask.copy()
                    cv2.imshow("mask_crop", croped_img_mask)
                    croped_img_mask = np.transpose(croped_img_mask, (2, 0, 1))

                    # Variables
                    cloud = torch.from_numpy(cloud.astype(
                        np.float32)).unsqueeze(0)
                    choose = torch.LongTensor(choose.astype(
                        np.int32)).unsqueeze(0)
                    #img_masked = torch.from_numpy(img_masked.astype(np.float32)).unsqueeze(0)
                    img_masked = torch.from_numpy(
                        croped_img_mask.astype(np.float32)).unsqueeze(0)
                    index = torch.LongTensor([idx]).unsqueeze(
                        0)  # Specify which object

                    cloud = Variable(cloud).cuda()
                    choose = Variable(choose).cuda()
                    img_masked = Variable(img_masked).cuda()
                    index = Variable(index).cuda()

                    # Deploy
                    with torch.no_grad():
                        pred_r, pred_t, pred_c, emb = estimator(
                            img_masked, cloud, choose, index)

                    pred_r = pred_r / torch.norm(pred_r, dim=2).view(
                        1, num_points, 1)
                    pred_c = pred_c.view(bs, num_points)
                    how_max, which_max = torch.max(pred_c, 1)
                    pred_t = pred_t.view(bs * num_points, 1, 3)
                    points = cloud.view(bs * num_points, 1, 3)

                    my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()
                    my_t = (points.view(bs * num_points, 1, 3) +
                            pred_t)[which_max[0]].view(-1).cpu().data.numpy()
                    my_pred = np.append(my_r, my_t)

                    # Refinement
                    for ite in range(0, iteration):
                        T = Variable(torch.from_numpy(my_t.astype(
                            np.float32))).cuda().view(1, 3).repeat(
                                num_points,
                                1).contiguous().view(1, num_points, 3)
                        my_mat = quaternion_matrix(my_r)
                        R = Variable(
                            torch.from_numpy(my_mat[:3, :3].astype(
                                np.float32))).cuda().view(1, 3, 3)
                        my_mat[0:3, 3] = my_t

                        new_cloud = torch.bmm((cloud - T), R).contiguous()
                        pred_r, pred_t = refiner(new_cloud, emb, index)
                        pred_r = pred_r.view(1, 1, -1)
                        pred_r = pred_r / (torch.norm(pred_r, dim=2).view(
                            1, 1, 1))
                        my_r_2 = pred_r.view(-1).cpu().data.numpy()
                        my_t_2 = pred_t.view(-1).cpu().data.numpy()
                        my_mat_2 = quaternion_matrix(my_r_2)

                        my_mat_2[0:3, 3] = my_t_2
                        my_mat_final = np.dot(my_mat, my_mat_2)
                        my_r_final = copy.deepcopy(my_mat_final)
                        my_r_final[0:3, 3] = 0
                        my_r_final = quaternion_from_matrix(my_r_final, True)
                        my_t_final = np.array([
                            my_mat_final[0][3], my_mat_final[1][3],
                            my_mat_final[2][3]
                        ])

                        my_pred = np.append(my_r_final, my_t_final)
                        my_r = my_r_final
                        my_t = my_t_final

                        my_r_matrix = quaternion_matrix(my_r)[:3, :3]
                    #print("Time = " + str(time.time()-start))
                    my_t_per_frame.append(my_t)
                    my_r_per_frame.append(my_r_matrix)

                    #rotation = Rot.from_matrix(my_r_matrix)
                    #angle = rotation.as_euler('xyz', degrees=True)

                    my_t = np.around(my_t, 5)
                    #print("translation vector = " + str(my_t))
                    #print("rotation angles = " + str(my_r))

                    frame = posenet_deploy.get_3d_bbox(frame, model_points,
                                                       my_r_matrix, my_t)
                    frame = posenet_deploy.draw_axes(frame, my_r_matrix, my_t)

                    if check_inverted(crop_image_to_check):
                        cv2.putText(frame,
                                    str(num_idx) + "_inverted", pos_text,
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2, cv2.LINE_AA)
                    else:
                        cv2.putText(frame, str(num_idx), pos_text,
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2, cv2.LINE_AA)

                    #cv2.putText(frame, str(num_idx), pos_text, cv2.FONT_HERSHEY_SIMPLEX,
                    #            0.5, (0,255,0), 2, cv2.LINE_AA)

                    posenet_deploy.putText(frame, i, num_idx, class_id, my_t)
                    #cv2.imshow('Result', rgb)
                    #cv2.waitKey(1)

                else:
                    rmin, rmax, cmin, cmax = posenet_deploy.get_bbox(bbox)
                    img_masked = rgb[:, :, ::-1]  # bgr to rgb
                    img_masked = np.transpose(img_masked, (2, 0, 1))
                    img_masked = img_masked[:, rmin:rmax, cmin:cmax]

                    my_mask = np.transpose(label_of_object, (2, 0, 1))
                    my_mask = my_mask[:, rmin:rmax, cmin:
                                      cmax]  ## Added by me to crop the mask
                    mask_img = np.transpose(my_mask, (1, 2, 0))
                    img_rgb = np.transpose(img_masked, (1, 2, 0))
                    croped_img_mask = cv2.bitwise_and(img_rgb, mask_img)
                    crop_image_to_check = croped_img_mask.copy()

                    pos_text = (bbox[0], bbox[1])
                    last_key = list(my_t_pool.keys())[-1]

                    print("POOL: " + str(my_t_pool[last_key]))
                    class_id = segmentation['instances'].pred_classes[i].cpu(
                    ).data.numpy()

                    my_t = my_t_pool[last_key][min_idx]
                    my_r_matrix = my_r_pool[last_key][min_idx]

                    my_t_per_frame.append(my_t)
                    my_r_per_frame.append(my_r_matrix)

                    frame = posenet_deploy.get_3d_bbox(frame, model_points,
                                                       my_r_matrix, my_t)
                    frame = posenet_deploy.draw_axes(frame, my_r_matrix, my_t)

                    if check_inverted(crop_image_to_check):
                        cv2.putText(frame,
                                    str(num_idx) + "_inverted", pos_text,
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2, cv2.LINE_AA)
                    else:
                        cv2.putText(frame, str(num_idx), pos_text,
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2, cv2.LINE_AA)

                    #cv2.putText(frame, str(num_idx), pos_text, cv2.FONT_HERSHEY_SIMPLEX,
                    #            0.5, (0,255,0), 2, cv2.LINE_AA)

                    posenet_deploy.putText(frame, i, num_idx, class_id, my_t)

            if len(my_t_per_frame) > 0:
                my_t_pool[frameth] = my_t_per_frame
                my_r_pool[frameth] = my_r_per_frame

            ref_frame_label = cur_frame_label
            ref_frame_axies = cur_frame_axies

            end = time.time() - start
            cv2.putText(frame,
                        "Time processing: " + str(round(end, 3)) + " seconds",
                        (100, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),
                        2, cv2.LINE_AA)
            cv2.imshow('Result', frame)
            cv2.waitKey(1)

        else:
            # Show images
            #video_writer.write(rgb)
            cv2.imshow('Result', rgb)
            cv2.waitKey(1)

    pipeline.stop()
Ejemplo n.º 38
0
    def DenseFusion(self, img, depth, posecnn_res):
        my_result_wo_refine = []

        itemid = 1  # this is simplified for single label decttion, if multi-label used, check DFYW3.py for more

        depth = np.array(depth)
        # img = img

        seg_res = posecnn_res

        x1, y1, x2, y2 = seg_res["box"]
        banana_bbox_draw = self.posecnn.get_box_rcwh(seg_res["box"])
        rmin, rmax, cmin, cmax = int(y1), int(y2), int(x1), int(x2)
        try:
            depth = depth[:, :,
                          1]  # because depth has 3 dimensions RGB but they are the all the same with each other
        except:
            # depth=depth
            pass
        depth = np.nan_to_num(depth)  #DIY
        mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))  # ok
        mask_depth_nonzeros = mask_depth[:].nonzero()
        label_banana = np.squeeze(seg_res["mask"])
        label_banana = ma.getmaskarray(ma.masked_greater(label_banana, 0.5))
        label_banana_nonzeros = label_banana.flatten().nonzero()

        mask_label = ma.getmaskarray(ma.masked_equal(
            label_banana, itemid))  # label from banana label
        mask_label_nonzeros = mask_label[:].nonzero()

        mask = mask_label * mask_depth

        mask_nonzeros = mask[:].flatten().nonzero()
        mask_target = mask[rmin:rmax, cmin:cmax]
        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        res_len_choose = len(choose)
        if len(choose) > self.num_points:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num_points] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            # print("(?)len of choose is 0, check error")
            print("Info, DenseFusion: len(choose)=", len(choose))
            # return "ERROR, img broken (?)"
            # choose = np.pad(choose, (0, self.num_points - len(choose)), 'wrap')
            return None

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])
        pt2 = depth_masked / self.cam_scale
        pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
        pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)

        img_masked = np.array(img)[:, :, :3]
        img_masked = np.transpose(img_masked, (2, 0, 1))
        img_masked = img_masked[:, rmin:rmax, cmin:cmax]

        cloud = torch.from_numpy(cloud.astype(np.float32))
        choose = torch.LongTensor(choose.astype(np.int32))
        img_masked = self.norm(torch.from_numpy(img_masked.astype(np.float32)))
        index = torch.LongTensor([itemid - 1])

        cloud = Variable(cloud).cuda()
        choose = Variable(choose).cuda()
        img_masked = Variable(img_masked).cuda()
        index = Variable(index).cuda()

        cloud = cloud.view(1, self.num_points, 3)
        img_masked = img_masked.view(1, 3,
                                     img_masked.size()[1],
                                     img_masked.size()[2])

        pred_r, pred_t, pred_c, emb = self.estimator(img_masked, cloud, choose,
                                                     index)
        pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, self.num_points, 1)

        pred_c = pred_c.view(self.bs, self.num_points)
        how_max, which_max = torch.max(pred_c, 1)
        pred_t = pred_t.view(self.bs * self.num_points, 1, 3)
        points = cloud.view(self.bs * self.num_points, 1, 3)

        my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()
        my_t = (points + pred_t)[which_max[0]].view(-1).cpu().data.numpy()
        my_pred = np.append(my_r, my_t)
        my_result_wo_refine.append(my_pred.tolist())

        my_result = []
        for ite in range(0, self.iteration):
            T = Variable(torch.from_numpy(
                my_t.astype(np.float32))).cuda().view(1, 3).repeat(
                    self.num_points,
                    1).contiguous().view(1, self.num_points, 3)
            my_mat = quaternion_matrix(my_r)
            R = Variable(torch.from_numpy(my_mat[:3, :3].astype(
                np.float32))).cuda().view(1, 3, 3)
            my_mat[0:3, 3] = my_t

            new_cloud = torch.bmm((cloud - T), R).contiguous()
            pred_r, pred_t = self.refiner(new_cloud, emb, index)
            pred_r = pred_r.view(1, 1, -1)
            pred_r = pred_r / (torch.norm(pred_r, dim=2).view(1, 1, 1))
            my_r_2 = pred_r.view(-1).cpu().data.numpy()
            my_t_2 = pred_t.view(-1).cpu().data.numpy()
            my_mat_2 = quaternion_matrix(my_r_2)

            my_mat_2[0:3, 3] = my_t_2
            my_mat_final = np.dot(my_mat, my_mat_2)
            my_r_final = copy.deepcopy(my_mat_final)
            my_r_final[0:3, 3] = 0
            my_r_final = quaternion_from_matrix(my_r_final, True)
            my_t_final = np.array(
                [my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]])

            my_pred = np.append(my_r_final, my_t_final)
            my_result.append(my_pred.tolist())
        my_result_np = np.array(my_result)
        my_result_mean = np.mean(my_result, axis=0)
        my_r = my_result_mean[:4]
        my_t = my_result_mean[4:]
        my_r_quaternion = my_r
        return my_r_quaternion, my_t
Ejemplo n.º 39
0
    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(eq(masked_where(greater_equal(x, 2), x),
                   masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(eq(masked_inside(array(list(range(5)),
                                       mask=[1, 0, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 1, 1, 0]))
        assert_(eq(masked_outside(array(list(range(5)),
                                        mask=[0, 1, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 0, 0, 1]))
        assert_(eq(masked_equal(array(list(range(5)),
                                      mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 0]))
        assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
                                          mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 1]))
        assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
                   [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))