コード例 #1
0
def shading_correction_folder(inputfolder,
                              outputfolder,
                              binning=3,
                              magnification=20):
    """
    Covert lab specific.
    Not to be called by preprocess_operation. Use it in separate from a pipeline.
    """
    refpath = 'http://archive.simtk.org/ktrprotocol/temp/ffref_{0}x{1}bin.npz'.format(
        magnification, binning)
    darkrefpath = 'http://archive.simtk.org/ktrprotocol/temp/ffdarkref_{0}x{1}bin.npz'.format(
        magnification, binning)
    ref, darkref = retrieve_ff_ref(refpath, darkrefpath)
    parentfolder = inputfolder
    for dirname, subdirlist, filelist in os.walk(parentfolder):
        if 'metadata.txt' in filelist:
            outputdir = join(outputfolder, dirname.split(parentfolder)[-1])
            if not os.path.exists(outputdir):
                os.makedirs(outputdir)
            with open(join(dirname, 'metadata.txt')) as mfile:
                data = json.load(mfile)
                channels = data['Summary']['ChNames']
            for chnum, ch in enumerate(channels):
                pathlist = glob(join(dirname,
                                     '*channel{0:03d}*'.format(chnum)))
                for path in pathlist:
                    if ch == 'PHASE':
                        img = imread(path)
                        tiff.imsave(join(outputdir, os.path.basename(path)),
                                    img.astype(np.float32))
                    else:
                        img = correct_shade(imread(path), ref, darkref, ch)
                        tiff.imsave(join(outputdir, os.path.basename(path)),
                                    img.astype(np.float32))
コード例 #2
0
ファイル: vcam.py プロジェクト: tracking-fun-1/oflearn
    def load_rec(self, rec):
        """
        Load image and related data given an image_info record.

        Returns im3d, mask3d, coords2d, im2d.
        For a 2d dataset, im3d == im2d,
                        mask3d == self.global_mask
                      coords2d == None
        """
        if self.is3d:
            fn = join(self.projpoints_dir, rec['pmvsid'] + '_0000.exr')
            im3d, mask3d, coords2d = imread_projpoints(fn)
            im2d = util.imread(join(self.data_dir, rec['filename']))
        else:
            im3d_fn = join(self.data_dir, rec['filename'])
            im3d = util.imread(im3d_fn)

            # assumes for now that 2d datasets have no per-image masks
            if self.global_mask is not None:
                mask3d = self.global_mask
            else:
                mask3d = np.ones(im3d[:,:,0].shape, dtype=bool)
            coords2d = None
            im2d = im3d
        return im3d, mask3d, coords2d, im2d
コード例 #3
0
def step5_rectify_images(args, step4_out):
    """Rectify images based on RANSAC fit of essential matrix."""
    _, ransac = step4_out
    P1 = ransac['camera']
    P0 = np.hstack((np.eye(3), np.zeros((3, 1))))
    K = np.loadtxt(fname=args.K)
    P1 = np.dot(K, P1)
    P0 = np.dot(K, P0)
    im0, im1 = imread(args.images[0]), imread(args.images[1])
    with Timer('step5-computation'):
        r0, r1, ri0, ri1 = image_pair_rectification(P0,
                                                    P1,
                                                    im0,
                                                    im1,
                                                    sampling_factor=args.rsf)
    plt.imsave(
        os.path.join(args.outdir, "rect-" + os.path.basename(args.images[0])),
        r0)
    plt.imsave(
        os.path.join(args.outdir, "rect-" + os.path.basename(args.images[1])),
        r1)
    ri0.tofile(
        os.path.join(args.outdir, "rect-idx-" +
                     os.path.basename(args.images[0])).split('.')[0] + '.bin')
    ri1.tofile(
        os.path.join(args.outdir, "rect-idx-" +
                     os.path.basename(args.images[1])).split('.')[0] + '.bin')
コード例 #4
0
def demo_inpainting():
    inFileName = 'images/new_original.png'
    y = imread(inFileName, outputFormat='YCbCr')
    maskFileName = 'images/new_mask.png'
    mask = np.array(imread(maskFileName), dtype=bool)
    gridModel = HDPGridModel('models/HDP')
    x = gridModel.inpaint(y, mask)
    imwrite(x, 'HDP_inpainting_results.png')
コード例 #5
0
def step1_sift_detect(args):
    """Run SIFT key-point detection and descriptors on images."""
    ims = [
        imread(image_filename, dtype='float32', force_grayscale=True)
        for image_filename in args.images
    ]
    with Timer('step1-computation'):
        if args.use_sift_striped:
            siftkps = [
                sift_filter_striped(im, nthread=args.cpu_count) for im in ims
            ]
        else:
            siftkps = sift_filter_batch(ims)
    print('sift 1 #: ', siftkps[0].shape[0])
    print('sift 2 #: ', siftkps[1].shape[0])
    # Begin Visualize
    c_im = np.hstack(ims)
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.imshow(c_im, cmap='gray', interpolation='nearest')
    x0, y0 = siftkps[0][:, :2].T
    x1, y1 = siftkps[1][:, :2].T
    shift = ims[0].shape[1]
    ax.plot(x0, y0, 'rx', markersize=1)
    ax.plot(x1 + shift, y1, 'bx', markersize=1)
    ax.autoscale()
    ax.set_title('Step1: SIFT Keypoints Detected')
    # End Visualize
    return siftkps
コード例 #6
0
ファイル: vcam.py プロジェクト: tracking-fun-1/oflearn
    def __init__(self, dataset_name):
        self.name = dataset_name
        self.base_dir = join(os.getenv('VCAM_ROOT'), 'data', dataset_name)
        self.data_dir = join(self.base_dir, 'data')
        self.projpoints_dir = join(self.data_dir, 'projpoints')
        self.pid_dir = join(self.data_dir, 'pids')

        self.results_dir = join(self.base_dir, 'results')
        if not os.path.exists(self.results_dir):
            os.mkdir(self.results_dir)

        # load image_info
        imginfo_fn = join(self.data_dir, 'image_info.json')
        with open(imginfo_fn) as f:
            self.image_info = json.load(f)
        self.size = len(self.image_info)

        # set whether this is a 3d dataset or not
        self.is3d = False if 'projpoints' in self.image_info[0]['filename'] else True

        # load a global mask if exists
        self.global_mask = None
        global_mask_fn = join(self.projpoints_dir, 'mask.png')
        if exists(global_mask_fn):
            self.global_mask = util.imread(global_mask_fn) > 0
            if self.global_mask.ndim > 2:
                self.global_mask = self.global_mask[:,:,0]

        # get dimensions
        im3d, _, _, _ = self.load_rec(self.image_info[0])
        self.dims = im3d.shape
        self.N = np.prod(self.dims[:2])

        self.normals, self.coords3d = self._load_normals_coords()
コード例 #7
0
    def __getitem__(self, idx):
        if torch.is_tensor(idx):
            idx = idx.tolist()

        # Get image name
        idx, labels = self.df.values[idx]  # 得到了CSV文件中按照顺序排列的图片的ID和其标签
        img_name = self.root_dir.format(idx)  # 由图像的ID得到其路径

        # Augmentation
        flip = False
        shift_rgb = False
        if self.training:
            flip = np.random.randint(5) == 1  # 设定一定的翻转概率
            shift_rgb = np.random.randint(5) == 1  # 设定一定的RGB shift 概率
        # Read image
        img0 = imread(img_name, True)
        img = preprocess_image(img0, flip=flip)
        if shift_rgb:
            img = img[:, :, ::-1].copy()
        img = np.rollaxis(img, 2, 0)

        # Get mask and regression maps
        mask, gaussian_mask, regr = get_mask_and_regr(img0, labels, flip=flip)
        regr = np.rollaxis(regr, 2, 0)  # convert HWC to CHW

        return [img, mask, gaussian_mask, regr]
コード例 #8
0
def step4_triangulate_points(args, step3_out):
    """Triangulate the points detected as inliers from the previous step."""
    ransac, x0, x1, xd, yd = step3_out
    idx = ransac['inlier_idx']
    P1 = ransac['camera']
    P0 = np.hstack((np.eye(3), np.zeros((3, 1))))
    with Timer('step4-computation'):
        RX = dlt_triangulate(P0, P1, x0[idx], x1[idx])
    RX = RX[..., :] / RX[..., -1].reshape(-1, 1)
    xy0 = xd[idx, :2].astype('int32')
    xy1 = yd[idx, :2].astype('int32')
    im0, im1 = imread(args.images[0]), imread(args.images[1])
    im0v = im0[xy0[:, 1], xy0[:, 0]]
    im1v = im1[xy1[:, 1], xy1[:, 0]]
    rgb = np.round(255 * (im0v + im1v) / 2.).astype('uint8')
    write_ply(os.path.join(args.outdir, "sparse_inliers.ply"), RX, rgb=rgb)
    return RX, ransac
コード例 #9
0
def demo_eDP():
    I = imread('images/barbara.png')
    sigma = 25.0 / 255.0
    PRNG = np.random.RandomState(0)
    y = I + sigma * PRNG.randn(I.shape[0], I.shape[1])
    gridModel = DPGridModel('models/DP')
    x, PSNR = gridModel.denoise(y, sigma, I)
    imwrite(x, 'eDP_results.png')
コード例 #10
0
def step2_match_keypoints(args, step1_out):
    """Using output of step1, find likely matches."""
    x, y = step1_out
    _x = normalize_to_ubyte_and_multiple_16_dim(x)
    _y = normalize_to_ubyte_and_multiple_16_dim(y)
    with Timer('step2-computation'):
        if args.matching_method == 'bruteforce':
            nn_idx, nn_dist = nn_bruteforcel1k2((_x + 128).astype('uint8'),
                                                (_y + 128).astype('uint8'),
                                                nthreads=args.cpu_count)
        elif args.matching_method == 'cascading-hash':
            nn_idx, nn_dist = nn_cascading_hash(_x, _y)
    ratio = nn_dist[:, 1] / nn_dist[:, 0].astype('float64')
    pass_idx = ratio >= args.min_ratio
    idx0, _ = nn_idx.T
    xd = x[idx0[pass_idx]]
    yd = y[pass_idx]
    # Begin Visualize
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    im0, im1 = imread(args.images[0]), imread(args.images[1])
    c_im = np.hstack([im0, im1])
    ax.imshow(c_im, cmap='gray', interpolation='nearest')
    x0, y0 = xd[:, :2].T
    x1, y1 = yd[:, :2].T
    shift = im0.shape[1]
    x1 = x1.copy() + shift
    # plot points
    ax.plot(x0, y0, 'rx', markersize=3)
    ax.plot(x1, y1, 'bx', markersize=3)
    lines = np.asarray(zip(zip(x0, y0), zip(x1, y1)))
    # randomize line colors
    rand_idx = np.random.randint(lines.shape[0],
                                 size=int(lines.shape[0] *
                                          args.percent_to_show))
    lines = lines[rand_idx]
    lc = mc.LineCollection(lines, cmap=plt.cm.gist_ncar, linewidths=1)
    lc.set_array(np.random.random(lines.shape[0]))
    ax.add_collection(lc)
    ax.autoscale()
    ax.set_title('Step2: Match SIFT Keypoints')
    # End Visualize
    return xd, yd
コード例 #11
0
ファイル: vcam.py プロジェクト: tracking-fun-1/oflearn
def load_from_rec(dataset, rec, is3d=True):
    # returns im3d, mask3d, coords2d, im2d
    base_dir = '/home/swehrwein/vcam/data/'
    data_dir = join(base_dir, dataset, 'data')

    im2d_fn = join(data_dir, rec['filename'])
    im2d = util.imread(im2d_fn)

    if is3d:
        im3d_fn = join(data_dir, 'projpoints', rec['pmvsid'] + '_0000.exr')
        im3d, mask3d, coords2d = imread_projpoints(im3d_fn)

    return im3d, mask3d, coords2d, im2d
コード例 #12
0
 def __getitem__(self, idx):
     if torch.is_tensor(idx):
         idx = idx.tolist()
     
     # Get image name
     idx, labels = self.df.values[idx]
     img_name = self.root_dir.format(idx)
     
     # Augmentation
     flip = False
     if self.training:
         flip = np.random.randint(2) == 1
     
     # Read image
     img0 = imread(img_name, True)
     img = preprocess_image(img0, flip=flip)
     img = np.rollaxis(img, 2, 0)
     
     # Get mask and regression maps
     #print("img_name: {}, labels = {}".format(img_name, labels))
     mask, regr = get_mask_and_regr(img0, labels, flip=False)
     regr = np.rollaxis(regr, 2, 0)
     
     return [img, mask, regr]
コード例 #13
0
ファイル: main.py プロジェクト: efriesen/CS270-Visualization
import interpret_features as interpret
import identify_features as identify
import util
from PIL import Image

#Return the arguments
def initialize_argument_parser():
    parser = argparse.ArgumentParser(description='Import data from an image')
    parser.add_argument('-i', dest = 'input_file', 
            help='the image to import', default='data/sample_chart_easy.png')
    return vars(parser.parse_args())

if __name__ == "__main__":
    args = initialize_argument_parser()
    input_file = args["input_file"]
    image=util.imread(input_file)
    #util.display_graph(image)
    #print image
    image_labeled, feature_count = identify.identify_features(image)
    print 'feature_count:', feature_count
    feature_types = identify.identify_feature_types(image, image_labeled, feature_count)
    axes_box = identify.identify_axes_box(image)
    filtered_image = identify.nongrayscale_raw(image)
    image_analyzer = interpret.analyzer(image, filtered_image, image_labeled, feature_types, axes_box)
    #determine feature types of identified features
    #object to perform analysis of features
    #util.write_array('image_labeled.txt',image_labeled)
    #a set of slices that comprise the objects in the image
    object_slices = image_analyzer.object_slices
    #Convert the input image into a PIL-friendly format
コード例 #14
0
import numpy as np
import cv2
import os
from util import imread, precomputed_kernel, plot_eigenvector, save_gif
from kmeans import kmeans

EPS = 1e-9

# set parameters
img_path = 'image2.png'
image_flat, HEIGHT, WIDTH = imread(img_path)
gamma_s = 0.001
gamma_c = 0.001
k = 3  # k clusters
k_means_initType = 'k_means_plusplus'
gif_path = os.path.join(
    'GIF',
    '{}_{}Clusters_{}'.format(img_path.split('.')[0], k, 'normalized.gif'))

# similarity matrix
W = precomputed_kernel(image_flat, gamma_s, gamma_c)
# degree matrix
D = np.diag(np.sum(W, axis=1))
L = D - W
D_inverse_square_root = np.diag(1 / np.diag(np.sqrt(D)))
L_sym = D_inverse_square_root @ L @ D_inverse_square_root
'''
eigenvalue,eigenvector=np.linalg.eig(L_sym)
np.save('{}_eigenvalue_{:.3f}_{:.3f}_normalized'.format(img_path.split('.')[0],gamma_s,gamma_c),eigenvalue)
np.save('{}_eigenvector_{:.3f}_{:.3f}_normalized'.format(img_path.split('.')[0],gamma_s,gamma_c),eigenvector)
'''
コード例 #15
0
# prepare model saver/summary writer
saver_GP = tf.train.Saver(var_list=varsGP)

print(util.toYellow("======= EVALUATION START ======="))
timeStart = time.time()
# start session
tfConfig = tf.ConfigProto(allow_soft_placement=True)
tfConfig.gpu_options.allow_growth = True
with tf.Session(config=tfConfig) as sess:
    sess.run(tf.global_variables_initializer())
    util.restoreModel(opt, sess, saver_GP, opt.loadGP, "GP")
    print(util.toMagenta("start evaluation..."))

    # create directories for test image output
    os.makedirs("eval_{0}".format(opt.loadGP), exist_ok=True)
    testImage = util.imread(opt.loadImage)

    # resize input image size to 128*128
    import cv2
    testImage = cv2.resize(testImage, dsize=(128, 128))
    # end of resize code

    batch = data.makeBatchEval(opt, testImage, bags, PH)
    runList = [imageCompAll[0], imageCompAll[-1]]
    ic0, icf = sess.run(runList, feed_dict=batch)
    for b in range(opt.batchSize):
        util.imsave("eval_{0}/image_g{1}_input.png".format(opt.loadGP, b),
                    ic0[b])
        util.imsave("eval_{0}/image_g{1}_output.png".format(opt.loadGP, b),
                    icf[b])
コード例 #16
0
import numpy as np
import os
import matplotlib.pyplot as plt
from util import imread, show_eigenface, show_reconstruction, performance
from pca import pca
from lda import lda

if __name__ == '__main__':
    filepath = os.path.join('Yale_Face_Database', 'Training')
    H, W = 231, 195
    X, y = imread(filepath, H, W)

    eigenvalues_pca, eigenvectors_pca, X_mean = pca(X, num_dim=31)
    X_pca = eigenvectors_pca.T @ (X - X_mean)
    eigenvalues_lda, eigenvectors_lda = lda(X_pca, y)

    # Transform matrix
    U = eigenvectors_pca @ eigenvectors_lda
    print('U shape: {}'.format(U.shape))

    # show top 25 eigenface
    show_eigenface(U, 25, H, W)

    # reduce dim (projection)
    Z = U.T @ X

    # recover
    X_recover = U @ Z + X_mean
    show_reconstruction(X, X_recover, 10, H, W)

    # accuracy
コード例 #17
0
ファイル: test.py プロジェクト: AIArtGroup/Sketch
    pPertFG = pert*tf.random_normal([batchSize,warpDim])
    # ------ define GP ------
    geometric = graph.combine
    # ------ geometric predictor ------
    imageWarped = geometric(WarpdData,stdGP,batchSize,dataH,dataW,pPertFG,warpN)
    # ------ optimizer ------
    #varsGP = [v for v in tf.global_variables() if "geometric" in v.name]

# prepare model saver/summary writer
saver_GP = tf.train.Saver()

print(util.toYellow("======= EVALUATION START ======="))
# start session
tfConfig = tf.ConfigProto(allow_soft_placement=True)
tfConfig.gpu_options.allow_growth = True
with tf.Session(config=tfConfig) as sess:
    sess.run(tf.global_variables_initializer())
	#restore the model
    saver_GP.restore(sess,trained_model)
    print(util.toMagenta("start evaluation..."))

    testImage = util.imread(loadImage)
    batch = data.makeBatchEval_tps(batchSize,testImage,PH)
    runList = [WarpdData,imageWarped]
    ic0,icf = sess.run(runList,feed_dict=batch)
    #print(ic0.shape,icf.shape)
    util.imsave("eval/image__input.png",1-ic0[0])
    util.imsave("eval/image__output{0}.png".format(stack_num),1-icf[0])

print(util.toYellow("======= EVALUATION DONE ======="))
コード例 #18
0
        return 99.99
    MAX_PIXEL_VAL = 255
    return 20 * math.log10(MAX_PIXEL_VAL / math.sqrt(mse))


if __name__ == "__main__":
    magnitude = 2
    EpochNum = 3
    nc = 1  # only luma channel
    ''' checkpoint '''
    if not os.path.isdir("checkpoint"):
        os.mkdir("checkpoint")
    model_prefix = "checkpoint\\srcnn"
    ''' load test image '''
    test_image = os.path.join(os.getcwd(), 'Test\\Set5\\butterfly_GT.bmp')
    image = imread(test_image, is_grayscale=True)
    scipy.misc.imsave('ori_image.png', image)
    nh, nw = image.shape
    ''' generate bicubic interpolated images '''
    MAX_PIXEL_VAL = 128
    mod_input = (image - MAX_PIXEL_VAL) / MAX_PIXEL_VAL
    bicubic = scipy.ndimage.interpolation.zoom(mod_input, (1. / magnitude),
                                               prefilter=False)
    bicubic = scipy.ndimage.interpolation.zoom(bicubic,
                                               magnitude,
                                               prefilter=False)
    bicubic = (bicubic * MAX_PIXEL_VAL) + MAX_PIXEL_VAL
    bicubic.astype(int)
    psnr_bicubic = '%.4f' % compute_psnr(image, bicubic)
    scipy.misc.imsave('bicubic_image_PSNR_' + str(psnr_bicubic) + '.jpg',
                      bicubic)
コード例 #19
0
saver_GP = tf.train.Saver(var_list=varsGP)

print(util.toYellow("======= EVALUATION START ======="))
timeStart = time.time()
# start session
tfConfig = tf.ConfigProto(allow_soft_placement=True)
tfConfig.gpu_options.allow_growth = True
with tf.Session(config=tfConfig) as sess:
    sess.run(tf.global_variables_initializer())
    util.restoreModel(opt, sess, saver_GP, opt.loadGP, "GP")
    print(util.toMagenta("start evaluation..."))

    # create directories for test image output
    os.makedirs("eval_{0}".format(opt.loadGP), exist_ok=True)

    from boxx import *
    tmpp = pathjoin(tmpboxx(), 'tmp.png')
    imsave(tmpp, resize(imread(opt.loadImage), (144, 144)))

    testImage = util.imread(tmpp)
    batch = data.makeBatchEval(opt, testImage, glasses, PH)
    runList = [imageCompAll[0], imageCompAll[-1]]
    ic0, icf = sess.run(runList, feed_dict=batch)
    for b in range(opt.batchSize):
        util.imsave("eval_{0}/image_g{1}_input.png".format(opt.loadGP, b),
                    ic0[b])
        util.imsave("eval_{0}/image_g{1}_output.png".format(opt.loadGP, b),
                    icf[b])

print(util.toYellow("======= EVALUATION DONE ======="))