예제 #1
0
파일: seed.py 프로젝트: youtang1993/trains
def make_deterministic(seed=1337, cudnn_deterministic=False):
    """
    Ensure deterministic behavior across PyTorch using the provided random seed.
    This function makes sure that torch, numpy and random use the same random seed.

    When using trains's task, call this function using the task's random seed like so:
        make_deterministic(task.get_random_seed())

    :param int seed: Seed number
    :param bool cudnn_deterministic: In order to make computations deterministic on your specific platform
    and PyTorch release, set this value to True. torch will only allow those CuDNN algorithms that are
    (believed to be) deterministic. This can have a performance impact (slower execution) depending on your model.
    """
    seed = int(seed) & 0xFFFFFFFF
    torch = sys.modules.get("torch")
    tf = sys.modules.get("tensorflow")

    if cudnn_deterministic:
        try:
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False
        except Exception:
            pass

    random.seed(seed)

    if np is not None:
        np.random.seed(seed)

    if cv2 is not None:
        try:
            cv2.setRNGSeed(seed)
        except Exception:
            pass

    if torch is not None:
        try:
            torch.manual_seed(seed)
            torch.cuda.manual_seed(seed)
        except Exception:
            pass

    if tf is not None:
        # reset graph state
        try:
            import tensorflow
            from tensorflow.python.eager.context import _context
            eager_mode_bypass = _context is None
        except Exception:
            eager_mode_bypass = False

        if not eager_mode_bypass:
            try:
                tf.set_random_seed(seed)
            except Exception:
                pass
            try:
                tf.random.set_random_seed(seed)
            except Exception:
                pass
예제 #2
0
파일: lib.py 프로젝트: maizemaze/photo_grid
def doKMeans(img, k=3, features=[0]):
    """
    ----------
    Parameters
    ----------
    """

    # data type conversion for opencv
    ## select features
    img = img[:, :, features].copy()
    ## standardize
    img_max, img_min = img.max(axis=(0, 1)), img.min(axis=(0, 1))-(1e-8)
    img = (img-img_min)/(img_max-img_min)
    ## convert to float32
    img_z = img.reshape((-1, img.shape[2])).astype(np.float32)
    
    # define criteria, number of clusters(K) and apply kmeans()
    criteria = (cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    param_k = dict(data=img_z,
                   K=k,
                   bestLabels=None,
                   criteria=criteria,
                   attempts=10,
                #    flags=cv2.KMEANS_RANDOM_CENTERS)
                   flags=cv2.KMEANS_PP_CENTERS)

    # KMEANS_RANDOM_CENTERS
    cv2.setRNGSeed(99163)
    _, img_k_temp, center = cv2.kmeans(**param_k)

    # Convert back
    img_k = img_k_temp.astype(np.uint8).reshape((img.shape[0], -1))

    # return
    return img_k, center
예제 #3
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    np.random.seed(420)
    cv2.setRNGSeed(1337)

    camera_tracker = CameraTracker(camera_parameters, corner_storage, frame_sequence_path, known_view_1, known_view_2)
    camera_tracker.track_camera()

    view_mats = [camera_tracker.known_view_mats[i] for i in range(camera_tracker.frame_count)]

    calc_point_cloud_colors(
        camera_tracker.point_cloud_builder,
        camera_tracker.rgb_sequence,
        view_mats,
        camera_tracker.intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = camera_tracker.point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #4
0
def _cv2_estimate_E_with_intrinsics(cfg, matches, kps1, kps2, calib1, calib2):
    '''Estimate the Essential matrix from correspondences. Assumes known
    intrinsics.
    '''

    # Reference: https://docs.opencv.org/3.4.7/d9/d0c/group__calib3d.html
    # Defalt values for confidence: 0.99 (F), 0.999 (E)
    # Default value for the reprojection threshold: 3
    cur_key = 'config_{}_{}'.format(cfg.dataset, cfg.task)
    geom = cfg.method_dict[cur_key]['geom']
    if geom['method'].lower() == 'cv2-ransac-e':
        cv_method = 'FM_RANSAC'
        cv_threshold = geom['threshold']
        cv_confidence = geom['confidence']
    elif geom['method'].lower() == 'cv2-lmeds-e':
        cv_method = 'FM_LMEDS'
        cv_threshold = None
        cv_confidence = geom['confidence']
    else:
        raise ValueError('Unknown method to estimate E')

    is_valid, matches, kp1, kp2 = _preprocess(matches, kps1, kps2, 5)
    if not is_valid:
        return _fail()

    # Normalize keypoints with ground truth intrinsics
    kp1_n = normalize_keypoints(kp1, calib1['K'])
    kp2_n = normalize_keypoints(kp2, calib2['K'])

    cv2.setRNGSeed(cfg.opencv_seed)
    E, mask_E = cv2.findEssentialMat(kp1_n,
                                     kp2_n,
                                     method=getattr(cv2, cv_method),
                                     threshold=cv_threshold,
                                     prob=cv_confidence)
    mask_E = mask_E.astype(bool).flatten()

    # OpenCV can return multiple values as 6x3 or 9x3 matrices
    if E is None:
        return _fail()
    elif E.shape[0] != 3:
        Es = np.split(E, len(E) / 3)
    # Or a single matrix
    else:
        Es = [E]

    # Find the best E
    E, num_inlier = None, 0
    # mask_E_cheirality_check = None
    for _E in Es:
        _num_inlier, _R, _t, _mask = cv2.recoverPose(_E, kp1_n[mask_E],
                                                     kp2_n[mask_E])
        if _num_inlier >= num_inlier:
            num_inlier = _num_inlier
            E = _E
            # This is unused for now
            # mask_E_cheirality_check = _mask.flatten().astype(bool)

    indices = matches[:, mask_E.flatten()]
    return E, indices
예제 #5
0
def test_boxes_DetectFaceKeypointNet2D32(image_with_faces,
                                         boxes_FaceKeypointNet2D32):
    cv2.ocl.setUseOpenCL(False)
    cv2.setNumThreads(1)
    cv2.setRNGSeed(777)
    detector = DetectFaceKeypointNet2D32()
    assert_inferences(detector, image_with_faces, boxes_FaceKeypointNet2D32)
예제 #6
0
    def __init__(self):
        super().__init__()

        self.image_subscriber = rospy.Subscriber("camera/image_raw", Image, self.image_callback, queue_size=1)
        self.image_publisher = rospy.Publisher("camera/line_image", Image, queue_size=1)
        self.point_cloud_publisher = rospy.Publisher("field_point_cloud", PointCloud2, queue_size=1)

        cv2.setRNGSeed(12345)
        pass
예제 #7
0
def test_keypoints_DetectFaceKeypointNet2D32(
        image_with_faces, keypoints_DetectFaceKeypointNet2D32):
    cv2.ocl.setUseOpenCL(False)
    cv2.setNumThreads(1)
    cv2.setRNGSeed(777)
    estimator = DetectFaceKeypointNet2D32()
    inferences = estimator(image_with_faces)
    predicted_keypoints = inferences['keypoints']
    assert len(predicted_keypoints) == len(keypoints_DetectFaceKeypointNet2D32)
    # TODO openCV is not deterministic with it's predictions
    print(predicted_keypoints)
    for label, preds in zip(keypoints_DetectFaceKeypointNet2D32,
                            predicted_keypoints):
        assert np.allclose(label, preds)
    def __init__(self):
        super().__init__()

        self.image_subscriber = rospy.Subscriber("camera/image_raw",
                                                 Image,
                                                 self.image_callback,
                                                 queue_size=1)
        self.image_publisher = rospy.Publisher("camera/goal_image",
                                               Image,
                                               queue_size=1)
        self.goal_post_need_subscriber = rospy.Subscriber(
            "goal_post_need", Bool, self.goal_post_need_callback, queue_size=1)
        self.goal_post_need = False
        cv2.setRNGSeed(12345)
        pass
예제 #9
0
    def _slow_analysis(self, image):
        # Constants for k-means
        criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        num_colors = 8

        # Initial attempt at finding labels
        cv.setRNGSeed(num_colors)
        resize_factor = 4

        # Setup full image
        slow_image = image.reshape((-1, 3))
        slow_image = np.float32(slow_image)

        # Setup smaller image
        fast_image = cv.resize(image,
                               None,
                               fx=1 / resize_factor,
                               fy=1 / resize_factor)
        fast_image_array = fast_image.reshape((-1, 3))
        fast_image_array = np.float32(fast_image_array)

        # Generate labels on large image
        if self.conf.options.cuda:
            center, label = self.km.kmeans_cuda(slow_image,
                                                num_colors,
                                                tolerance=0.1,
                                                seed=4,
                                                device=0)
        else:
            # Generate labels on small image
            _, fast_label, fast_center = cv.kmeans(fast_image_array,
                                                   num_colors, None, criteria,
                                                   5, cv.KMEANS_RANDOM_CENTERS)
            fast_label = cv.resize(
                fast_label,
                (1, fast_label.size * resize_factor * resize_factor),
                interpolation=cv.INTER_NEAREST)
            fast_center = np.multiply(fast_center,
                                      (resize_factor * resize_factor))
            _, label, center = cv.kmeans(
                slow_image, num_colors, fast_label, criteria, 1,
                cv.KMEANS_USE_INITIAL_LABELS + cv.KMEANS_PP_CENTERS,
                fast_center)

        # Update image with new color space
        center = np.uint8(center)
        res = center[label.flatten()]
        return res.reshape((image.shape))
예제 #10
0
def random_seed(seed_value):
    # reproducibility
    # python RNG
    random.seed(seed_value)

    # numpy RNG
    np.random.seed(seed_value)

    # pytorch RNGs
    torch.manual_seed(seed_value)

    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed_value)
        torch.cuda.manual_seed_all(seed_value)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    cv2.setRNGSeed(seed_value)
예제 #11
0
def do_match(a_feature_filename,
             b_feature_filename,
             a_image_filename,
             b_image_filename,
             show_result=True):
    cv.setRNGSeed(0)
    img_object = cv.imread(a_image_filename, cv.IMREAD_GRAYSCALE)
    img_scene = cv.imread(b_image_filename, cv.IMREAD_GRAYSCALE)

    if img_object is None or img_scene is None:
        if show_result:
            print('Could not open or find the images!')
            return
        else:
            return False

    keypoints_obj, descriptors_obj = read_features_file(a_feature_filename)
    keypoints_scene, descriptors_scene = read_features_file(b_feature_filename)

    #-- Step 2: Matching descriptor vectors with a FLANN based matcher
    # Since SURF is a floating-point descriptor NORM_L2 is used
    matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
    knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)

    #-- Filter matches using the Lowe's ratio test
    ratio_thresh = 0.9
    good_matches = []
    for m, n in knn_matches:
        if m.distance < 0.5:
            if m.distance < ratio_thresh * n.distance:
                good_matches.append(m)

    selected_matches = []

    if len(good_matches) == 0:
        if show_result:
            plt.imshow(img_scene)
            print('No good matches found!')
            return
        else:
            return False
    print(len(good_matches), '/', len(knn_matches))

    #-- Localize the object
    obj = np.empty((len(good_matches), 2), dtype=np.float32)
    scene = np.empty((len(good_matches), 2), dtype=np.float32)

    for i in range(len(good_matches)):
        #-- Get the keypoints from the good matches
        obj[i, 0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
        obj[i, 1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
        scene[i, 0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
        scene[i, 1] = keypoints_scene[good_matches[i].trainIdx].pt[1]

    H, _ = cv.findHomography(obj, scene, cv.RANSAC, 25)

    #     print(len(_))
    #     print(len(good_matches))
    #     print(_)

    for i in range(len(_)):
        if _[i] == 1:
            selected_matches.append(good_matches[i])


#     print(selected_matches)
#     print(np.sum(_==1))

    if H is None:
        if show_result:
            plt.imshow(img_scene)
            print('Can' 't find homography!')
            return
        else:
            return False

    if show_result:
        #-- Draw matches
        img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]),
                                img_object.shape[1] + img_scene.shape[1], 3),
                               dtype=np.uint8)
        #         cv.drawMatches(img_object,
        #                        keypoints_obj,
        #                        img_scene,
        #                        keypoints_scene,
        #                        good_matches,
        #                        (255,255,0)
        #                        )

        cv.drawMatches(img_object,
                       keypoints_obj,
                       img_scene,
                       keypoints_scene,
                       selected_matches,
                       img_matches,
                       flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
        #         cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, matchColor=(255, 255, 0), flags=2)
        #-- Get the corners from the image_1 ( the object to be "detected" )
        obj_corners = np.empty((4, 1, 2), dtype=np.float32)
        obj_corners[0, 0, 0] = 0
        obj_corners[0, 0, 1] = 0
        obj_corners[1, 0, 0] = img_object.shape[
            1]  # shape 1 is width, shape 0 is height
        obj_corners[1, 0, 1] = 0
        obj_corners[2, 0, 0] = img_object.shape[1]
        obj_corners[2, 0, 1] = img_object.shape[0]
        obj_corners[3, 0, 0] = 0
        obj_corners[3, 0, 1] = img_object.shape[0]

        scene_corners = cv.perspectiveTransform(obj_corners, H)
        #-- Draw lines between the corners (the mapped object in the scene - image_2 )
        cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
            (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
        cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
            (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
        cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
            (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
        cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
            (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
        #-- Show detected matches
        plt.imshow(img_matches)
    else:
        return len(good_matches)
예제 #12
0
파일: test.py 프로젝트: paulds8/planetoids
import sys

sys.path.append("..")

import pytest
import pandas as pd
import numpy as np
import pickle
import cv2 as cv
import random
from sklearn.datasets import make_blobs
from planetoids import planetoids as pt

np.random.seed(42)
random.seed(42)
cv.setRNGSeed(42)


def test_world_construction():
    planet = pt.Planetoid(data, "0", "1", "Cluster", random_state=42)
    return planet


def test_world_construction_exceptions():
    with pytest.raises(Exception):
        assert pt.Planetoid([], "0", "1", "Cluster", random_state=42)
    with pytest.raises(Exception):
        assert pt.Planetoid(data, "a", "1", "Cluster", random_state=42)
    with pytest.raises(Exception):
        assert pt.Planetoid(data, "0", "b", "Cluster", random_state=42)
    with pytest.raises(Exception):
예제 #13
0
파일: snr.py 프로젝트: atharris/DINO_CREx
import numpy as np
from scipy import misc
import math
import matplotlib.pyplot as plt
import image_processing_functions as imfunc
import search_location_functions as locfunc
import cv2
import time

cv2.setRNGSeed(int(time.time()))

extended = True
point = False

# function to get the power signal noise ratio of two images
# if the images are the same the result is undefined and 0 is returned
def getPSNR(img1, img2):
    i, j = img1.shape
    diff = np.empty(img1.shape)
    cv2.absdiff(img1, img2, diff)
    diff = np.square(diff)
    s = diff.sum()
    if s < 1e-10:
        return 0
    mse = s / (i*j)
    psnr = 10*np.log10(255**2 / mse)
    return psnr

# signal_threshold, noise_threshold, ROI_size (n x n pixel border), single side ROI_border_width
ROI_parameters = {}
ROI_parameters['signal_threshold'] = 1.5
예제 #14
0
# 0421.py
import cv2
import numpy as np
import time

dst = np.full((512, 512, 3), (255, 255, 255), dtype=np.uint8)
nPoints = 100
pts = np.zeros((1, nPoints, 2), dtype=np.uint16)

cv2.setRNGSeed(int(time.time()))
cv2.randu(pts, (0, 0), (512, 512))

# draw points
for k in range(nPoints):
    x, y = pts[0, k][:]  # pts[0, k, :]
    cv2.circle(dst, (x, y), radius=5, color=(0, 0, 255), thickness=-1)

cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyAllWindows()
예제 #15
0
def _cv2_estimate_E_without_intrinsics(cfg, matches, kps1, kps2, calib1,
                                       calib2):
    '''Estimate the Essential matrix from correspondences. Computes the
    Fundamental Matrix first and then retrieves the Essential matrix assuming
    known intrinsics.
    '''

    # Reference: https://docs.opencv.org/3.4.7/d9/d0c/group__calib3d.html
    # Defalt values for confidence: 0.99 (F), 0.999 (E)
    # Default value for the reprojection threshold: 3
    # (We set them to -1 when not applicable as OpenCV complains otherwise)
    cur_key = 'config_{}_{}'.format(cfg.dataset, cfg.task)
    geom = cfg.method_dict[cur_key]['geom']
    if geom['method'].lower() in ['cv2-ransac-f', 'cv2-patched-ransac-f']:
        min_matches = 8
        cv_method = 'FM_RANSAC'
        cv_reprojection_threshold = geom['threshold']
        cv_confidence = geom['confidence']
        if geom['method'].lower() == 'cv2-patched-ransac-f':
            cv_max_iter = geom['max_iter']
    elif geom['method'].lower() == 'cv2-lmeds-f':
        min_matches = 8
        cv_method = 'FM_LMEDS'
        cv_reprojection_threshold = -1
        cv_confidence = geom['confidence']
    elif geom['method'].lower() == 'cv2-7pt':
        # This should actually be *equal* to 7? We'll probably never use it...
        min_matches = 7
        cv_method = 'FM_7POINT'
        cv_reprojection_threshold = -1
        cv_confidence = -1
    elif geom['method'].lower() == 'cv2-8pt':
        min_matches = 8
        cv_method = 'FM_8POINT'
        cv_reprojection_threshold = -1
        cv_confidence = -1
    else:
        raise ValueError('Unknown method to estimate F')

    is_valid, matches, kp1, kp2 = _preprocess(matches, kps1, kps2, min_matches)
    if not is_valid:
        return _fail()

    cv2.setRNGSeed(cfg.opencv_seed)

    # Temporary fix to allow for patched opencv
    if geom['method'].lower() == 'cv2-patched-ransac-f':
        F, mask_F = cv2.findFundamentalMat(
            kp1,
            kp2,
            method=getattr(cv2, cv_method),
            ransacReprojThreshold=cv_reprojection_threshold,
            confidence=cv_confidence,
            maxIters=cv_max_iter)
    else:
        F, mask_F = cv2.findFundamentalMat(
            kp1,
            kp2,
            method=getattr(cv2, cv_method),
            ransacReprojThreshold=cv_reprojection_threshold,
            confidence=cv_confidence)
    mask_F = mask_F.astype(bool).flatten()

    # OpenCV can return multiple values as 6x3 or 9x3 matrices
    if F is None:
        return _fail()
    elif F.shape[0] != 3:
        Fs = np.split(F, len(F) / 3)
    else:
        Fs = [F]

    # Find the best F
    K1, K2 = calib1['K'], calib2['K']
    kp1n = normalize_keypoints(kp1, K1)
    kp2n = normalize_keypoints(kp2, K2)
    E, num_inlier = None, 0
    # mask_E_cheirality_check = None
    for _F in Fs:
        _E = np.matmul(np.matmul(K2.T, _F), K1)
        _E = _E.astype(np.float64)
        _num_inlier, _R, _t, _mask = cv2.recoverPose(_E, kp1n[mask_F],
                                                     kp2n[mask_F])
        if _num_inlier >= num_inlier:
            num_inlier = _num_inlier
            E = _E
            # This is unused for now
            # mask_E_cheirality_check = _mask.flatten().astype(bool)

    # Return the initial list of matches (from F)
    indices = matches[:, mask_F.flatten()]
    return E, indices
예제 #16
0
import numpy as np
import cv2 as cv
from skimage.registration import phase_cross_correlation
from skimage.filters import difference_of_gaussians

from .preprocess import preprocess

# seeding
cv.setRNGSeed(0)


class Stitcher(object):
    """Stitches images together.

    Args:
        scales (list of float): scaling factor for stitching
            this should be a list and low-res images will be used first
            if no match is found, higher-res images will be used
        lowe_ratio (float): Lowe's ratio for discarding false matches
            the lower, the more false matches are discarded, defaults to 0.7
        min_inliers (int): minimum number of matches to attempt
            estimation of affine transform, the higher, the more high-quality
            the match, defaults to 50, this is also used for checking whether
            a higher resolution image should be used, higher res matching
            is attempted when no. of inliers from RANSAC < min_inliers
            minimum number of inliers is 4
        ransac_reproj_threshold (float): max reprojection error in RANSAC
            to consider a point as an inlier, the higher, the more tolerant
            RANSAC is, defaults to 7.0
    """
    def __init__(self,
예제 #17
0
import random
import matplotlib.pyplot as plt
import torch.nn as nn

#%%
debug = False
torch.cuda.empty_cache()
torch.cuda.device_count()

#%%
seed = 666
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
cv2.setRNGSeed(seed)

#%% [markdown]
# ## Define model frome zoo and training ops
#%% [markdown]
# ### Setup hyper-parameters

#%%
data_dir = os.path.join('.', 'data_set')
contents = os.listdir(os.path.join(data_dir, 'val'))
# Number of classes
num_classes = len(contents)
# Batch size for training 64 - based on Google Benchmarks for K80
batch_size = 10  # 8 is for my GPU with 2Gb RAM
# Number of epochs to train for
num_epochs = 8
예제 #18
0
def classify_rgb_KMC(img, K=3, plot=1):
    """
    This classifies an RGB image using K-Means clustering.
    Note: only 10 colors are specified, so will have plotting error with K > 10
    INPUTS:
        1) img: a 3D numpy array of rgb image
        2) K: optional, the number of K-Means Clusters
        3) plot: a flag that determine if multiple figures of classified is displayed. 
                1 = plot displayed, 0 = no plot is displayed
    OUTPUTS:
        1) label_image: a 2D numpy array the same x an y dimensions as input rgb image, 
            but each pixel is a k-means class.
        2) result_image: a 3D numpy array the same dimensions as input rgb image, 
            but having undergone Color Quantization which is the process of 
            reducing number of colors in an image.
    """
    # Setting random seed to ensure results are consistent with each run
    cv2.setRNGSeed(1)

    # Preparing RGB Image
    vectorized = img.reshape((-1, 3))
    vectorized = np.float32(vectorized)

    # K-Means
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    attempts = 10
    ret, label, center = cv2.kmeans(vectorized, K, None, criteria, attempts,
                                    cv2.KMEANS_RANDOM_CENTERS)

    # Use if you want to have quantized imaged
    center = np.uint8(center)
    res = center[label.flatten()]
    result_image = res.reshape((img.shape))

    # Labeled class image
    label_image = label.reshape((img.shape[0], img.shape[1]))

    if plot == 1:
        # Plotting Results
        coloroptions = [
            'b', 'g', 'r', 'c', 'm', 'y', 'k', 'orange', 'navy', 'gray'
        ]
        fig = plt.figure(figsize=(10, 5))
        ax1 = fig.add_subplot(1, 2, 1)
        ax1.imshow(img)
        ax1.set_title('Original Image')
        ax1.set_xticks([])
        ax1.set_yticks([])
        ax2 = fig.add_subplot(1, 2, 2)
        cmap = colors.ListedColormap(coloroptions[0:K])
        ax2.imshow(label_image, cmap=cmap)
        ax2.set_title('K-Means Classes')
        ax2.set_xticks([])
        ax2.set_yticks([])
        fig.subplots_adjust(left=0.05, top=0.8, bottom=0.01, wspace=0.05)
        plt.show(block='TRUE')

        # Plotting just K-Means with label
        ticklabels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
        fig, ax = plt.subplots(figsize=(5, 5))
        im = ax.imshow(label_image, cmap=cmap)
        cbar = fig.colorbar(im, ax=ax, shrink=0.6, ticks=np.arange(0, K))
        cbar.ax.set_yticklabels(ticklabels[0:K])
        cbar.ax.set_ylabel('Classes')
        plt.show(block='TRUE')

    return label_image, result_image
예제 #19
0
import numpy as np
from pprint import pprint
from sys import exit
from PIL import Image
from os import path
import caffe, cv2
caffe.set_random_seed(666)
import numpy.random
numpy.random.seed(666)
import random
random.seed(666)
cv2.setRNGSeed(666)

import util


class RpnDetector:
    def __init__(self, configFile):
        root = path.dirname(configFile)

        self.config = util.readConfig(configFile)

        caffe.set_device(0)
        caffe.set_mode_gpu()

        self.net = caffe.Net(
            path.join(root, self.config["dnn_deploy_file"][0]),
            path.join(
                root,
                self.config["dnn_weight_files"][0],
            ), caffe.TEST)
#0421-setRNGSeed.py
# 흰 캔버스에 빨간 점들이 생겼음
# 노이즈를 이미지에 추가한다음에. 원본이미지에 노이즈가 있을때 어떻게 처리할수 있는가?
# 라는것이다.
# 랜덤 시드값을 집어넣어서 랜덤하게 점을 찍게 만들어놓고,
import cv2
import numpy as np
import time

dst = np.full((512,512,3), (255, 255, 255), dtype= np.uint8)
nPoints = 100
pts = np.zeros((1, nPoints, 2), dtype=np.uint16)

cv2.setRNGSeed(int(time.time())) # 타임.타임 년월일시분초 시간이1초지나갈때마다 점찍힐위치변경
#항상 돌때 일정하게 하고싶다면, 랜덤시드값에 특정한 문자열이나 숫자열값을 집어넣으면 된다.
#랜덤은 랜덤이지만 항상 동일한 결과가 나온다.
cv2.randu(pts, (0, 0), (512, 512))
            
# draw points
for k in range(nPoints):
    x, y = pts[0, k][:] # pts[0, k, :]
    cv2.circle(dst,(x,y),radius=5,color=(0,0,255),thickness=-1)
    #점을 랜덤포인트 찍어내게하는것
    
cv2.imshow('dst',  dst)
cv2.waitKey()    
cv2.destroyAllWindows()
예제 #21
0
def TrainingApp():
    # Training settings
    parser = argparse.ArgumentParser(description='Training application.')
    # Data parameters
    parser.add_argument('--input-dir',
                        type=str,
                        required=True,
                        help='the path to the directory with the input data.')
    parser.add_argument('--output-dir',
                        type=str,
                        required=True,
                        help='the path to output the results.')
    parser.add_argument('--output-dir-val',
                        type=str,
                        required=True,
                        help='the path to output the results for validation.')
    parser.add_argument('--resume',
                        default='',
                        type=str,
                        metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    # Optimization parameters
    parser.add_argument('--optimizer',
                        type=str,
                        choices=['adam', 'sgd', 'rmsprop'],
                        default='adam',
                        help='the optimization solver to use (default: adam)')
    parser.add_argument('--num-epochs',
                        type=int,
                        default=50,
                        metavar='N',
                        help='number of training epochs (default: 100)')
    parser.add_argument('--lr',
                        type=float,
                        default=1e-4,
                        metavar='LR',
                        help='learning rate (default: 1e-4)')
    parser.add_argument('--wd',
                        type=float,
                        default=1e-5,
                        metavar='WD',
                        help='weight decay (default: 1e-5)')
    parser.add_argument('--batch-size',
                        type=int,
                        default=2,
                        metavar='B',
                        help='the mini-batch size (default: 2)')
    # Training parameters
    parser.add_argument('--cuda',
                        action='store_true',
                        default=True,
                        help='enables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=666,
                        metavar='S',
                        help='random seed (default: 666)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=50,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument(
        '--log-interval-vis',
        type=int,
        default=5000,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--tensorboard',
                        action='store_true',
                        default=False,
                        help='use tensorboard for logging purposes')
    parser.add_argument('--num-workers',
                        default=8,
                        type=int,
                        help='the number of workers for the dataloader.')
    parser.add_argument('--gpu-id',
                        default='0',
                        type=str,
                        help='id(s) for CUDA_VISIBLE_DEVICES')
    args = parser.parse_args()

    tb_writer = None
    if args.tensorboard:
        tb_writer = SummaryWriter(log_dir=args.output_dir)

    # parse device string to a int ids
    device_ids = list(map(int, args.gpu_id.replace(',', '').strip()))
    device_type = 'cuda' if torch.cuda.is_available() else 'cpu'
    device = torch.device(device_type + ':' + str(device_ids[0]))
    args.device = device
    logger.info('=> device-ids: %s' % device_ids)
    logger.info('=> device-type: %s' % device_type)
    logger.info('=> primary device: %s' % device)

    # set the random seed
    np.random.seed(args.seed)
    cv2.setRNGSeed(args.seed)
    torch.manual_seed(args.seed)
    if device.type == 'cuda':
        cudnn.enabled = True
        cudnn.benchmark = True
        torch.cuda.set_device(device_ids[0])
        torch.cuda.manual_seed_all(args.seed)

    # Create output directory
    create_directory(args.output_dir)
    create_directory(args.output_dir_val)

    # create the dataset loader for training
    data_generator_train = MyDataset(data_root=args.input_dir, mode='train')

    # data loader for validation
    data_generator_val = MyDataset(data_root=args.input_dir, mode='val')

    data_loader_train = DataLoader(data_generator_train,
                                   shuffle=True,
                                   batch_size=args.batch_size,
                                   num_workers=args.num_workers,
                                   pin_memory=True)

    data_loader_val = DataLoader(data_generator_val,
                                 shuffle=False,
                                 batch_size=args.batch_size,
                                 num_workers=args.num_workers,
                                 pin_memory=True)

    # create model
    model = MyModel(num_outputs=1)
    model = model.to(device)

    # optionally resume from a checkpoint
    if args.resume:
        assert os.path.isfile(args.resume), \
            "Invalid file {}".format(args.resume)
        logger.info('=> loading checkpoint {}'.format(args.resume))
        pretrained_dict = torch.load(args.resume)
        net.load_state_dict(pretrained_dict, strict=False)
    else:
        logger.info('=> no checkpoint found at {}'.format(args.resume))

    # in case we want to use multiple devices for training
    model = nn.DataParallel(model, device_ids=device_ids)

    criterion = MyLoss()

    # create the optimizer the learning rate scheduler
    params = model.parameters()

    if args.optimizer == 'adam':
        optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.wd)
    elif args.optimizer == 'sgd':
        optimizer = optim.SGD(params,
                              lr=args.lr,
                              momentum=0.9,
                              weight_decay=args.wd)
    elif args.optimizer == 'rmsprop':
        optimizer = optim.RMSprop(params, lr=args.lr, weight_decay=args.wd)
    else:
        raise NotImplementedError("Not supported solver {}".format(
            args.optimizer))

    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.5)

    # epochs loop

    for epoch in range(args.num_epochs):

        train(epoch, model, optimizer, criterion, data_loader_train, tb_writer,
              args)

        with torch.no_grad():
            validate(epoch, model, criterion, data_loader_val, tb_writer, args)

        try:
            model_state_dict = model.module.state_dict()
        except:
            model_state_dict = model.state_dict()

        torch.save(
            model_state_dict,
            os.path.join(args.output_dir, '{0}_model.pth'.format(epoch)))

        # Update lr scheduler
        scheduler.step()
예제 #22
0
import torch
from torch.autograd import Variable
from torch.backends import cudnn
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torchvision import transforms

from keypoint_net import KeypointNet
from cross_ratio_loss import CrossRatioLoss
from utils import Logger
from utils import load_train_csv_dataset, prep_image, visualize_data, vis_tensor_and_save, calculate_distance, calculate_mean_distance
from dataset import ConeDataset

cv2.setRNGSeed(17)
torch.manual_seed(17)
np.random.seed(17)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if cuda else 'cpu')

visualization_tmp_path = "/outputs/visualization/"

def print_tensor_stats(x, name):
    flattened_x = x.cpu().detach().numpy().flatten()
    avg = sum(flattened_x)/len(flattened_x)
    print(f"\t\t{name}: {avg},{min(flattened_x)},{max(flattened_x)}")

def train_model(model, output_uri, dataloader, loss_function, optimizer, scheduler, epochs, val_dataloader, intervals, input_size, num_kpt, save_checkpoints, kpt_keys, study_name, evaluate_mode):
예제 #23
0
 def setUp(self):
     cv2.setRNGSeed(10)
     self.image_cache = {}
예제 #24
0
import torch
from torch.autograd import Variable
import cv2
import numpy as np
from glob import glob
import sys

use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
Tensor = FloatTensor

np.random.seed(3)
cv2.setRNGSeed(3)


def tv_norm(input, tv_beta):
    img = input[0, 0, :]
    row_grad = torch.mean(torch.abs((img[:-1, :] - img[1:, :])).pow(tv_beta))
    col_grad = torch.mean(torch.abs((img[:, :-1] - img[:, 1:])).pow(tv_beta))
    return row_grad + col_grad


def preprocess_image(img):
    img = np.float32(img)

    img = img / 255  # normalise

    mean = np.float32(np.array([0.485, 0.456, 0.406]))
    stdev = np.float32(np.array([0.229, 0.224, 0.225]))
    img = (img - mean) / stdev
예제 #25
0
    def __init__(
        self, data, y, x, cluster_field=None, ecology=None, random_state=None
    ):
        self._data = None
        self._y = None
        self._x = None
        self._cluster_field = None
        self._ecology = None
        self._random_state = None

        self._data_generated = False
        
        if isinstance(data, pd.DataFrame):
            self._data = data
        else:
            raise ValueError("Please provide a pandas DataFrame")
        if y in self._data.columns:
            self._y = y
        else:
            raise ValueError("X field not in provided DataFrame")
        if x in self._data.columns:
            self._x = x
        else:
            raise ValueError("Y field not in provided DataFrame")
        if cluster_field is not None or cluster_field in self._data.columns:
            self._cluster_field = cluster_field
        elif cluster_field is None:
            self._data['Cluster'] = ''
            self._cluster_field = 'Cluster'
        else:
            raise ValueError("Cluster field not in provided DataFrame")
        
        if isinstance(random_state, int):
            self._random_state = random_state
            np.random.seed(self.random_state)
            random.seed(self.random_state)
            cv.setRNGSeed(self.random_state)
        elif random_state is None:
            random_state = self._data.var().sum().astype(int)
            self._random_state = random_state
            np.random.seed(self.random_state)
            random.seed(self.random_state)
            cv.setRNGSeed(self.random_state)
        else:
            raise ValueError("Please provide an integer value for your random seed")
        
        if ecology is not None:
            try:
                cm.get_cmap(ecology, 1)
                self._ecology = ecology
            except Exception as e:
                raise ValueError(e)
        else:
            self._ecology = colors.ListedColormap(np.random.rand(256,3))

        # only keep what we need
        self._data = self._data[[self._y, self._x, self._cluster_field]].copy()

        # set the rest
        self._contours = dict()
        self._ocean_colour = None
        self._fig = None
        self._cmap = None
        self._max_contour = None
        self._shadows = list()
        self._highlight = list()
        self._topos = list()
        self._relief = list()
예제 #26
0
    '--log-interval',
    type=int,
    default=10,
    metavar='LI',
    help='how many batches to wait before logging training status')

args = parser.parse_args()

# set the device to use by setting CUDA_VISIBLE_DEVICES env variable in
# order to prevent any memory allocation on unused GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
cv2.setRNGSeed(args.seed)

if not os.path.exists(args.log_dir):
    os.makedirs(args.log_dir)

if args.cuda:
    cudnn.benchmark = True
    torch.cuda.manual_seed_all(args.seed)

LOG_DIR = args.log_dir + '/run-optim_{}-n{}-lr{}-wd{}-m{}-S{}-tanh'\
    .format(args.optimizer, args.n_triplets, args.lr, args.wd,
            args.margin, args.seed)
# create logger
logger = Logger(LOG_DIR)

예제 #27
0
def test_HaarCascadeFrontalFace(image_with_faces, boxes_HaarCascadeFace):
    cv2.ocl.setUseOpenCL(False)
    cv2.setNumThreads(1)
    cv2.setRNGSeed(777)
    detector = HaarCascadeFrontalFace()
    assert_inferences(detector, image_with_faces, boxes_HaarCascadeFace)
예제 #28
0
 def setUp(self):
     cv2.setRNGSeed(10)
     self.image_cache = {}
예제 #29
0
def test_DetectMiniXceptionFER(image_with_faces, boxes_MiniXceptionFER):
    cv2.ocl.setUseOpenCL(False)
    cv2.setNumThreads(1)
    cv2.setRNGSeed(777)
    detector = DetectMiniXceptionFER()
    assert_inferences(detector, image_with_faces, boxes_MiniXceptionFER)
예제 #30
0
def _cmp_estimate_E_with_intrinsics(cfg,
                                    matches,
                                    kps1,
                                    kps2,
                                    calib1,
                                    calib2,
                                    img1_fname=None,
                                    img2_fname=None):
    '''Estimate the Essential matrix from correspondences. Assumes known
    intrinsics.
    '''

    # Reference: https://docs.opencv.org/3.4.7/d9/d0c/group__calib3d.html
    # Defalt values for confidence: 0.99 (F), 0.999 (E)
    # Default value for the reprojection threshold: 3
    # (We set them to -1 when not applicable as OpenCV complains otherwise)

    is_valid, matches, kp1, kp2 = _preprocess(matches, kps1, kps2, 5)
    if not is_valid:
        return _fail()

    # Normalize keypoints with ground truth intrinsics
    kp1_n = normalize_keypoints(kp1, calib1['K'])
    kp2_n = normalize_keypoints(kp2, calib2['K'])
    if img1_fname is not None:
        s = (cv2.imread(img1_fname)).size
        h1, w1 = s[0], s[1]
        s = (cv2.imread(img2_fname)).size
        h2, w2 = s[0], s[1]
    else:
        raise ValueError('Requires image filenames')

    cv2.setRNGSeed(cfg.opencv_seed)
    E, mask_E = pygcransac.findEssentialMatrix(kp1, kp2, calib1['K'],
                                               calib2['K'], h1, w1, h2, w2,
                                               cfg.method_geom['threshold'],
                                               cfg.method_geom['confidence'],
                                               cfg.method_geom['max_iter'])
    mask_E = mask_E.astype(bool).flatten()

    # OpenCV can return multiple values as 6x3 or 9x3 matrices
    if E is None:
        return _fail()
    elif E.shape[0] != 3:
        Es = np.split(E, len(E) / 3)
    # Or a single matrix
    else:
        Es = [E]

    # Find the best E
    E, num_inlier = None, 0
    # mask_E_cheirality_check = None
    for _E in Es:
        _num_inlier, _R, _t, _mask = cv2.recoverPose(_E, kp1_n[mask_E],
                                                     kp2_n[mask_E])
        if _num_inlier >= num_inlier:
            num_inlier = _num_inlier
            E = _E
            # This is unused for now
            # mask_E_cheirality_check = _mask.flatten().astype(bool)

    indices = matches[:, mask_E.flatten()]
    return E, indices
예제 #31
0
                    "Convolutional Layer")
parser.add_argument("--cv2_seed",
                    type=int,
                    default=None,
                    help="Random seed for python-opencv")
parser.add_argument("--ub_size",
                    type=int,
                    default=655360,
                    help="Unified Buffer Size in bytes")
args = parser.parse_args()

# Set limits
fpga_layer.set_max_kernel_size(args.max_kernel_size)
fpga_layer.set_ub_size(args.ub_size)
if args.cv2_seed:
    cv2.setRNGSeed(args.cv2_seed)

# Parse config file
config = configparser.ConfigParser(strict=False,
                                   inline_comment_prefixes=('#', ';'))
config.read_dict({
    'INPUT': {
        'custom_layer': '',
        'width_override': -1,
        'height_override': -1
    },
    'OUTPUT': {
        'generate_source': 0,
        'generate_doxy': 0,
        'generate_dot': 0,
        'quantization': 1,