def execute(outputsize):
    
    faces_dir_path = "data/train_set/48_48_faces_web_augmented"
    bkgs_dir_path = "data/train_set/48_48_nonfaces_aflw"
    
    target_path = "data/train_set/13"
    
    
    faces_dir=join(target_path,"faces")
    nonfaces_dir = join(target_path,"nonfaces") 
    
    
 
    os.makedirs(nonfaces_dir)
    os.makedirs(faces_dir)
    
    img_faces = [ f for f in listdir(faces_dir_path) if isfile(join(faces_dir_path,f)) and f.endswith("png") ]
    img_bkgs =  [ f for f in listdir(bkgs_dir_path) if isfile(join(bkgs_dir_path,f)) and f.endswith("jpg") ]
    
    for i, img_name in enumerate(img_faces):
        img_path = join(faces_dir_path,img_name)
        img = imread(img_path)
        resized_img = resize(img,outputsize)     
        ubyte_img = img_as_ubyte(resized_img)   
        imsave(join(faces_dir,img_name), ubyte_img)
        print "processed "+ img_path
        
    for i, img_name in enumerate(img_bkgs):
        img_path = join(bkgs_dir_path,img_name)
        img = imread(img_path)
        gray_img = rgb2gray(img)  
        resized_img = resize(gray_img,outputsize)    
        ubyte_img = img_as_ubyte(resized_img)            
        imsave(join(nonfaces_dir,img_name), ubyte_img)
        print "processed "+ img_path
def evaluate_face_confidence_scores(state,folder="data/newnonfaces/13/nonfaces"):
    
    x = T.matrix("x")
    
    layer0_input = x.reshape((1, NUM_CHANNELS, 13, 13))

    net = twelve_net(layer0_input, None, relu, state)
    prediction = net.log_regression_layer.y_pred
    py_x = net.log_regression_layer.p_y_given_x

    test_model = theano.function(
            [x],
            [prediction, py_x, layer0_input],
       
    )
    target_path = "data/newnonfaces/13/nonface_faces"
        
    os.makedirs(target_path)
    
    output_dict = {}
    imgs =  [ f for f in listdir(folder) if isfile(join(folder,f)) and f.endswith("jpg") ]
    for i, img_name in enumerate(imgs):
        img_path = join(folder,img_name)
        img = io.imread(img_path)
        gray_img = rgb2gray(img)  
        ubyte_img = img_as_ubyte(gray_img)    
        [out_predict,out_py_x,out_face]=test_model(ubyte_img)    
        if out_py_x[0,1] > THRESHOLD:
            io.imsave(join(target_path,img_name), ubyte_img)    
        output_dict[img_name] = out_py_x[0,1]
        print "processed "+ img_path
    
    save_dict_to_file(output_dict)     
Example #3
0
    def rgba2rgb(arr: np.ndarray, bg_color: ColorType = None) -> np.ndarray:
        """
        Alpha-composites data in the numpy array against the given
        background color and returns a new buffer without the
        alpha component.

        :param arr: The input array of RGBA data
        :param bg_color: The background color
        :param out_buf: Optional buffer to render into

        :return: Array of composited RGB data
        """
        if bg_color is None:
            bg_color = np.array([0.0, 0.0, 0.0, 1.0])
        else:
            bg_color = np.array(tuple(bg_color), dtype=np.float)

        alpha = arr[..., -1]
        channels = arr[..., :-1]

        out_buf = np.empty_like(channels)

        for ichan in range(channels.shape[-1]):
            out_buf[..., ichan] = np.clip(
                (1 - alpha) * bg_color[ichan] + alpha * channels[..., ichan],
                a_min=0,
                a_max=1)

        return dtype.img_as_ubyte(out_buf)
Example #4
0
 def _generate_noisy_data(self):
     """
     This member function is called to generate the noise
     and apply it to the colour and/or depth image from the camera sensor.
     """
     for img_type in self._img_types:
         img = np.array(self._sensor.data[img_type])
         img = self._generate_noise(img)
         img = skitype.img_as_ubyte(img)
         img = Image.fromarray(img)
         self._data.update({img_type: img})
def get_all_images(path, names, suffix, gray=True):
    ims = []
    for n in names:
        fpath = f'{path}/{n}.{suffix}'
        im = imageio.imread(fpath)
        if gray:
            im = rgb2gray(im)

        im = img_as_ubyte(im)

        ims.append(im)

    return ims
def execute(outputsize):

    faces_dir_path = "data/train_set/48_48_faces_web_augmented"
    bkgs_dir_path = "data/train_set/48_48_nonfaces_aflw"

    target_path = "data/train_set/13"

    faces_dir = join(target_path, "faces")
    nonfaces_dir = join(target_path, "nonfaces")

    os.makedirs(nonfaces_dir)
    os.makedirs(faces_dir)

    img_faces = [
        f for f in listdir(faces_dir_path)
        if isfile(join(faces_dir_path, f)) and f.endswith("png")
    ]
    img_bkgs = [
        f for f in listdir(bkgs_dir_path)
        if isfile(join(bkgs_dir_path, f)) and f.endswith("jpg")
    ]

    for i, img_name in enumerate(img_faces):
        img_path = join(faces_dir_path, img_name)
        img = imread(img_path)
        resized_img = resize(img, outputsize)
        ubyte_img = img_as_ubyte(resized_img)
        imsave(join(faces_dir, img_name), ubyte_img)
        print "processed " + img_path

    for i, img_name in enumerate(img_bkgs):
        img_path = join(bkgs_dir_path, img_name)
        img = imread(img_path)
        gray_img = rgb2gray(img)
        resized_img = resize(gray_img, outputsize)
        ubyte_img = img_as_ubyte(resized_img)
        imsave(join(nonfaces_dir, img_name), ubyte_img)
        print "processed " + img_path
Example #7
0
 def _calculateStatistics(self, img, haralick=False, zernike=False):
     result = []
     # 3-bin histogram
     result.extend(mquantiles(img))
     # First four moments
     result.extend([img.mean(), img.var(), skew(img, axis=None), kurtosis(img, axis=None)])
     # Haralick features
     if haralick:
         integerImage = dtype.img_as_ubyte(img)
         result.extend(texture.haralick(integerImage).flatten())
     # Zernike moments
     if zernike:
         result.extend(zernike_moments(img, int(self.rows) / 2 + 1))
     return result
    def _mask_out_common_obstructions(self):
        """Mask out the sky and some common objects that can obstruct a facade 
        
        This is intended to be run prior to rectifications, since these things can prevent us from 
        correctly identifying the rectilinear features of a facade
        
        """
        features = driving.process_strip(
            channels_first(img_as_ubyte(self.data)))

        if self.data_mask is not None:
            occlusions = ~self.data_mask
        else:
            occlusions = np.zeros(self.data.shape[:2], dtype=np.bool)
        occlusions |= driving.occlusion(features)
        self.data_mask = ~occlusions
def resize_imgs_in_dir(outputsize):
    
    img_dir_path = "data/newnonfaces/48_48_non_faces_aflw"
    
    target_path = "data/newnonfaces/13/nonfaces"
        
    os.makedirs(target_path)
    
    img_faces = [ f for f in listdir(img_dir_path) if isfile(join(img_dir_path,f)) and f.endswith("jpg") ]
    
    for i, img_name in enumerate(img_faces):
        img_path = join(img_dir_path,img_name)
        img = imread(img_path)
        resized_img = resize(img,outputsize)     
        ubyte_img = img_as_ubyte(resized_img)   
        imsave(join(target_path,img_name), ubyte_img)
        print "processed "+ img_path
Example #10
0
 def _calculateStatistics(self, img, haralick=False, zernike=False):
     result = []
     #3-bin histogram
     result.extend(mquantiles(img))
     #First four moments
     result.extend([
         img.mean(),
         img.var(),
         skew(img, axis=None),
         kurtosis(img, axis=None)
     ])
     #Haralick features
     if haralick:
         integerImage = dtype.img_as_ubyte(img)
         result.extend(texture.haralick(integerImage).flatten())
     #Zernike moments
     if zernike:
         result.extend(zernike_moments(img, int(self.rows) / 2 + 1))
     return result
def resize_imgs_in_dir(outputsize):

    img_dir_path = "data/newnonfaces/48_48_non_faces_aflw"

    target_path = "data/newnonfaces/13/nonfaces"

    os.makedirs(target_path)

    img_faces = [
        f for f in listdir(img_dir_path)
        if isfile(join(img_dir_path, f)) and f.endswith("jpg")
    ]

    for i, img_name in enumerate(img_faces):
        img_path = join(img_dir_path, img_name)
        img = imread(img_path)
        resized_img = resize(img, outputsize)
        ubyte_img = img_as_ubyte(resized_img)
        imsave(join(target_path, img_name), ubyte_img)
        print "processed " + img_path
Example #12
0
def get_all_images(path, names, suffix, itype='gray'):
    ims = []
    for n in names:
        fpath = f'{path}/{n}.{suffix}'
        im = imageio.imread(fpath)
        if itype == 'gray':
            im = rgb2gray(im)
        elif itype == 'rgb' and len(im.shape) == 2:
            im = gray2rgb(im)
        elif itype == 'hsv' and len(im.shape) == 2:
            im = gray2rgb(im)
            im = rgb2hsv(im)
        elif itype == 'hsv' and len(im.shape) == 3:
            im = rgb2hsv(im)

        im = img_as_ubyte(im)

        ims.append(im)

    return ims
def evaluate_face_confidence_scores(state,
                                    folder="data/newnonfaces/13/nonfaces"):

    x = T.matrix("x")

    layer0_input = x.reshape((1, NUM_CHANNELS, 13, 13))

    net = twelve_net(layer0_input, None, relu, state)
    prediction = net.log_regression_layer.y_pred
    py_x = net.log_regression_layer.p_y_given_x

    test_model = theano.function(
        [x],
        [prediction, py_x, layer0_input],
    )
    target_path = "data/newnonfaces/13/nonface_faces"

    os.makedirs(target_path)

    output_dict = {}
    imgs = [
        f for f in listdir(folder)
        if isfile(join(folder, f)) and f.endswith("jpg")
    ]
    for i, img_name in enumerate(imgs):
        img_path = join(folder, img_name)
        img = io.imread(img_path)
        gray_img = rgb2gray(img)
        ubyte_img = img_as_ubyte(gray_img)
        [out_predict, out_py_x, out_face] = test_model(ubyte_img)
        if out_py_x[0, 1] > THRESHOLD:
            io.imsave(join(target_path, img_name), ubyte_img)
        output_dict[img_name] = out_py_x[0, 1]
        print "processed " + img_path

    save_dict_to_file(output_dict)
Example #14
0
def main():
    args = docopt(__doc__, version=VERSION)

    logparams = {}
    if args['--debug']:
        logparams.update(level=logging.DEBUG)
    elif args['--info']:
        logparams.update(level=logging.INFO)
    else:
        logparams.update(level=logging.CRITICAL)

    if args['--logfile'] != '':
        logparams.update(filename=args['--logfile'])

    logging.basicConfig(**logparams)

    logger = logging.getLogger('extract_patches')
    logger.debug('input \n {}'.format(args))

    assert isinstance(logger, logging.Logger)

    shapefiles = collect_filenames(args['-i'])
    if len(shapefiles) == 0:
        logger.error('No matching shapefiles for inoput `{}`'.format(args['-i']))
        return

    raster = args['-r']

    try:
        size = [int(x) for x in args['--size'].split(',')]
        patch_width, patch_height = size
        logger.debug("Set patch size to {} x {}".format(patch_width, patch_height))
    except:
        logger.error("Unable to parse option '--size'")
        return

    try:
        scale = float(args['--scale'])
        assert scale > 0
        logger.debug("Set scale to {}".format(scale))
    except:
        logger.error("Unable to parse option '--scale'")
        return

    silent = args['--noprogress']

    output_folder = args['--odir']
    try:
        if not os.path.isdir(output_folder):
            os.makedirs(output_folder)
            logger.debug("Created output folder '{}'".format(output_folder))
        else:
            logger.debug("Found existing output folder '{}'".format(output_folder))
    except:
        logger.error("Unable to find or create output directory `{}`".format(output_folder))
        return

    if args['--ojpg']:
        fmt = '.jpg'
    else:  # args['--otif']  (default)
        fmt = '.tif'
    logger.debug("Output format set to {}".format(fmt))

    clip = args['--vclip'] is not None
    if clip:
        clipmin, clipmax = [float(x) for x in args['--vclip'].split(',')]
        logger.debug("Clipping output to [{}, {}]".format(clipmin, clipmax))
    else:
        clipmin, clipmax = 0, 1
        logger.debug("Not clipping output -- assuming range of value is [{},{}]".format(clipmin, clipmax))

    stretch = args['--vstretch'] is not None
    if stretch:
        stretchmin, stretchmax = [float(x) for x in args['--vstretch'].split(',')]
        logger.debug("Output value range will be stretched to [{},{}]".format(stretchmin, stretchmax))
    else:
        logger.debug("Output values will not be stretched")

    if args['--csv']:
        csv_file_name = args['--csv']
        if os.path.isfile(csv_file_name):
            logger.error("CSV File already exists; please remove or rename it first.")
            logger.debug("Writing to CSV File '{}'".format(csv_file_name))
            return
    else:
        csv_file_name = None
        logger.debug("No CSV output")

    # Estimate number of shape features
    count = 0
    if not silent:
        pbar = ProgressBar(len(shapefiles), ['Counting Features:', Percentage(), ' ', Bar(), ' ', ETA()])
        pbar.start()
    for i, s in enumerate(shapefiles):
        vector = ogr.Open(s)
        layer = vector.GetLayer()
        count += layer.GetFeatureCount()
        if not silent:
            pbar.update(i)
    if not silent:
        pbar.finish()

    logger.debug("Counted {} features in {} shapefiles".format(count, len(shapefiles)))

    # Write header for CSV file
    if csv_file_name is not None:
        with open(os.path.join(output_folder, csv_file_name), 'w') as csvf:
            csvf.write('gx, gy, r1, r2, theta, patch_width, patch_height, image_namei\n')

    with rasterio.open(raster) as rf:
        assert isinstance(rf, RasterReader)
        srs = SpatialReference(str(rf.crs_wkt))
        affine = rf.affine
        geo_to_pixels = ~affine

        logging.debug("Output CRS will be '''{}'''".format(srs.ExportToPrettyWkt()))

        if not silent:
            pbar = ProgressBar(count, ['Exporting Patches:', Percentage(), ' ', Bar(), ' ', ETA()])
            pbar.start()
        for sf in shapefiles:
            logger.info("Processing input '{}'".format(sf))
            vector = ogr.Open(sf)
            assert isinstance(vector, ogr.DataSource)

            layer = vector.GetLayer()
            assert isinstance(layer, ogr.Layer)

            if not srs.IsSame(layer.GetSpatialRef()):
                logger.warning("Coordinate system mismatch (its ok, I will reproject)")

            for f in layer:
                if not silent:
                    pbar.update(pbar.currval + 1)
                geom = f.GetGeometryRef()
                assert isinstance(geom, ogr.Geometry)
                geom = geom.TransformTo(srs)
                points = geom.GetPoints()
                source = points[0]
                target = points[-1]
                sx, sy = geo_to_pixels * source
                tx, ty = geo_to_pixels * target
                if len(points) == 2:
                    cx, cy = (sx + tx) / 2, (sy + ty) / 2
                else:
                    cx, cy = geo_to_pixels * points[1]
                dx, dy = (tx - sx), (ty - sy)
                theta = degrees(atan2(dy, dx))  # In PIXELS, CCW from +x. Not necessarily CCW from E (or CW from N)
                r1 = hypot(tx - cx, ty - cy)
                r2 = hypot(cx - sx, cy - sy)
                r1, r2 = max(r1, r2), min(r1, r2)  # For 3 points, we assume two radii. Else these are duplicates.
                gx, gy = affine * (cx, cy)  # Geographic coordinates (e.g. lat lon) of the center.

                # We read a square slightly larger than the scaled version of our patch, so that
                # we can safely rotate the raster without missing pixels in the corners.

                box_radius = hypot(patch_width, patch_height) / (2.0 * scale)
                x0, x1 = int(floor(cx - box_radius)), int(ceil(cx + box_radius))
                y0, y1 = int(floor(cy - box_radius)), int(ceil(cy + box_radius))

                # save patch...

                kwargs = rf.meta
                patch_affine = (affine * Affine.translation(cx, cy) *
                                Affine.rotation(angle=-theta) * Affine.translation(-patch_width / 2.,
                                                                                   -patch_height / 2.))

                if fmt == '.tif':
                    kwargs.update(
                        driver='GTiff',
                        compress='lzw',
                        dtype=numpy.float32
                    )
                elif fmt == '.jpg':
                    kwargs.update(
                        driver='JPEG',
                        quality=90,
                        dtype=numpy.uint8
                    )

                kwargs.update(
                    transform=patch_affine,
                    width=patch_width,
                    height=patch_height
                )

                box_radius *= scale
                name = hashlib.md5(str(patch_affine) + raster).hexdigest()
                image_name = os.path.join(output_folder, name + fmt)

                if csv_file_name is not None:
                    with open(os.path.join(output_folder, csv_file_name), 'a+') as csvf:
                        fields = gx, gy, r1, r2, theta, patch_width, patch_height, image_name
                        csvf.write(','.join([str(_) for _ in fields]) + '\n')

                with rasterio.open(image_name, 'w', **kwargs) as pf:
                    assert isinstance(pf, RasterUpdater)
                    for band in range(rf.count):
                        patch = rf.read(band + 1, window=((y0, y1), (x0, x1)), boundless=True, )
                        patch = patch.astype(numpy.float32)
                        patch_rotated = rotate(patch, theta, reshape=False)
                        patch_scaled = zoom(patch_rotated, scale)
                        i0 = int(round(box_radius - patch_height / 2.))
                        i1 = i0 + patch_height
                        j0 = int(round(box_radius - patch_width / 2.))
                        j1 = j0 + patch_width
                        patch_cropped = patch_scaled[i0:i1, j0:j1]

                        if clip:
                            patch_cropped = numpy.clip(patch_cropped, clipmin, clipmax)
                        if stretch:
                            patch_cropped = (patch_cropped - clipmin) / (clipmax - clipmin)
                            patch_cropped = patch_cropped * (stretchmax - stretchmin) + stretchmin

                        if fmt == '.jpg':
                            # JPEG does not support floating point output. All we can do is 8 bit
                            # (python has not 12bit array type)
                            patch_cropped = img_as_ubyte(patch_cropped.clip(-1, 1))
                        pf.write(patch_cropped, band + 1)
        if not silent:
            pbar.finish()

        logger.debug("Finished.")
def check_for_image():

    import skimage
    import skimage.data
    import skimage.util

    img = skimage.data.lena()
    #img = io.imread("data/originalPics/2002/07/19/big/img_130.jpg")

    #img = io.imread("data/train_set/dataset/13/train/faces/00027998.png")

    img = rgb2gray(img)

    img = img_as_ubyte(img)
    img = img[:, :, np.newaxis]

    #img = imread("data/processed_images/13_train_set_aflw/train/faces/001111.jpg")

    for mul in xrange(3, 20):

        im = resize(img, (mul * 10, mul * 10))
        im = img_as_ubyte(im)
        arr = skimage.util.view_as_windows(im, (13, 13, NUM_CHANNELS),
                                           step=STEP_SIZE)
        f = file(LOAD_STATE_FROM_FILE, 'rb')
        obj = pickle.load(f)
        f.close()

        arr = np.rollaxis(arr, 5, 3)

        borrow = True
        shared_x = theano.shared(
            np.asarray(arr, dtype=theano.config.floatX),  # @UndefinedVariable
            borrow=borrow)

        iT = T.lscalar()
        jT = T.lscalar()

        x = T.tensor3("x")
        layer0_input = x.reshape((1, NUM_CHANNELS, 13, 13))

        net = twelve_net(layer0_input, None, relu, obj)
        prediction = net.log_regression_layer.y_pred
        py_x = net.log_regression_layer.p_y_given_x

        test_model = theano.function([iT, jT],
                                     [prediction, py_x, layer0_input],
                                     givens={x: shared_x[iT, jT, 0, :, :, :]})

        rows = arr.shape[0]
        cols = arr.shape[1]

        count = 0

        faces = []

        fig, axarr = plt.subplots(1, 2)
        confidence_map = np.zeros((rows, cols))

        for i in xrange(rows):
            for j in xrange(cols):
                [y, p_y_given_x, f] = test_model(i, j)
                f = f.reshape(NUM_CHANNELS, 13, 13)
                f = np.rollaxis(f, 0, 3)
                f = f[:, :, 0]
                confidence_map[i, j] = p_y_given_x[0, 1]

                #                 plt.imshow(f,cmap = "Greys_r")
                #                 plt.show()

                if y == 1:
                    count += 1
                    faces.append([i * STEP_SIZE, j * STEP_SIZE])
                    print i, j

        print("Check")
        # hack for gray
        im = im[:, :, 0]

        confidence_map = np.pad(confidence_map, 6, 'constant')

        axarr[0].get_xaxis().set_visible(False)
        axarr[0].get_yaxis().set_visible(False)
        axarr[1].get_xaxis().set_visible(False)
        axarr[1].get_yaxis().set_visible(False)

        axarr[1].imshow(confidence_map, cmap="Greys_r")
        axarr[0].imshow(im, cmap="Greys_r")
        img_desc = plt.gca()

        #plt.imshow(im,cmap = "Greys_r")
        #img_desc = plt.gca()

        for point in faces:
            topleftx = point[1]
            toplefty = point[0]

            rect = patches.Rectangle((topleftx, toplefty),
                                     13,
                                     13,
                                     fill=False,
                                     color='c')

            axarr[0].add_patch(rect)

        print count
        fig_name = "lena_" + str(mul) + ".png"
        plt.savefig(fig_name, bbox_inches='tight')
Example #16
0
def main():
    args = docopt(__doc__, version=VERSION)

    logparams = {}
    if args['--debug']:
        logparams.update(level=logging.DEBUG)
    elif args['--info']:
        logparams.update(level=logging.INFO)
    else:
        logparams.update(level=logging.CRITICAL)

    if args['--logfile'] != '':
        logparams.update(filename=args['--logfile'])

    logging.basicConfig(**logparams)

    logger = logging.getLogger('extract_patches')
    logger.debug('input \n {}'.format(args))

    assert isinstance(logger, logging.Logger)

    shapefiles = collect_filenames(args['-i'])
    if len(shapefiles) == 0:
        logger.error('No matching shapefiles for inoput `{}`'.format(
            args['-i']))
        return

    raster = args['-r']

    try:
        size = [int(x) for x in args['--size'].split(',')]
        patch_width, patch_height = size
        logger.debug("Set patch size to {} x {}".format(
            patch_width, patch_height))
    except:
        logger.error("Unable to parse option '--size'")
        return

    try:
        scale = float(args['--scale'])
        assert scale > 0
        logger.debug("Set scale to {}".format(scale))
    except:
        logger.error("Unable to parse option '--scale'")
        return

    silent = args['--noprogress']

    output_folder = args['--odir']
    try:
        if not os.path.isdir(output_folder):
            os.makedirs(output_folder)
            logger.debug("Created output folder '{}'".format(output_folder))
        else:
            logger.debug(
                "Found existing output folder '{}'".format(output_folder))
    except:
        logger.error("Unable to find or create output directory `{}`".format(
            output_folder))
        return

    if args['--ojpg']:
        fmt = '.jpg'
    else:  # args['--otif']  (default)
        fmt = '.tif'
    logger.debug("Output format set to {}".format(fmt))

    clip = args['--vclip'] is not None
    if clip:
        clipmin, clipmax = [float(x) for x in args['--vclip'].split(',')]
        logger.debug("Clipping output to [{}, {}]".format(clipmin, clipmax))
    else:
        clipmin, clipmax = 0, 1
        logger.debug(
            "Not clipping output -- assuming range of value is [{},{}]".format(
                clipmin, clipmax))

    stretch = args['--vstretch'] is not None
    if stretch:
        stretchmin, stretchmax = [
            float(x) for x in args['--vstretch'].split(',')
        ]
        logger.debug("Output value range will be stretched to [{},{}]".format(
            stretchmin, stretchmax))
    else:
        logger.debug("Output values will not be stretched")

    if args['--csv']:
        csv_file_name = args['--csv']
        if os.path.isfile(csv_file_name):
            logger.error(
                "CSV File already exists; please remove or rename it first.")
            logger.debug("Writing to CSV File '{}'".format(csv_file_name))
            return
    else:
        csv_file_name = None
        logger.debug("No CSV output")

    # Estimate number of shape features
    count = 0
    if not silent:
        pbar = ProgressBar(
            len(shapefiles),
            ['Counting Features:',
             Percentage(), ' ',
             Bar(), ' ',
             ETA()])
        pbar.start()
    for i, s in enumerate(shapefiles):
        vector = ogr.Open(s)
        layer = vector.GetLayer()
        count += layer.GetFeatureCount()
        if not silent:
            pbar.update(i)
    if not silent:
        pbar.finish()

    logger.debug("Counted {} features in {} shapefiles".format(
        count, len(shapefiles)))

    # Write header for CSV file
    if csv_file_name is not None:
        with open(os.path.join(output_folder, csv_file_name), 'w') as csvf:
            csvf.write(
                'gx, gy, r1, r2, theta, patch_width, patch_height, image_namei\n'
            )

    with rasterio.open(raster) as rf:
        assert isinstance(rf, RasterReader)
        srs = SpatialReference(str(rf.crs_wkt))
        affine = rf.affine
        geo_to_pixels = ~affine

        logging.debug("Output CRS will be '''{}'''".format(
            srs.ExportToPrettyWkt()))

        if not silent:
            pbar = ProgressBar(
                count,
                ['Exporting Patches:',
                 Percentage(), ' ',
                 Bar(), ' ',
                 ETA()])
            pbar.start()
        for sf in shapefiles:
            logger.info("Processing input '{}'".format(sf))
            vector = ogr.Open(sf)
            assert isinstance(vector, ogr.DataSource)

            layer = vector.GetLayer()
            assert isinstance(layer, ogr.Layer)

            if not srs.IsSame(layer.GetSpatialRef()):
                logger.warning(
                    "Coordinate system mismatch (its ok, I will reproject)")

            for f in layer:
                if not silent:
                    pbar.update(pbar.currval + 1)
                geom = f.GetGeometryRef()
                assert isinstance(geom, ogr.Geometry)
                geom = geom.TransformTo(srs)
                points = geom.GetPoints()
                source = points[0]
                target = points[-1]
                sx, sy = geo_to_pixels * source
                tx, ty = geo_to_pixels * target
                if len(points) == 2:
                    cx, cy = (sx + tx) / 2, (sy + ty) / 2
                else:
                    cx, cy = geo_to_pixels * points[1]
                dx, dy = (tx - sx), (ty - sy)
                theta = degrees(
                    atan2(dy, dx)
                )  # In PIXELS, CCW from +x. Not necessarily CCW from E (or CW from N)
                r1 = hypot(tx - cx, ty - cy)
                r2 = hypot(cx - sx, cy - sy)
                r1, r2 = max(r1, r2), min(
                    r1, r2
                )  # For 3 points, we assume two radii. Else these are duplicates.
                gx, gy = affine * (
                    cx, cy
                )  # Geographic coordinates (e.g. lat lon) of the center.

                # We read a square slightly larger than the scaled version of our patch, so that
                # we can safely rotate the raster without missing pixels in the corners.

                box_radius = hypot(patch_width, patch_height) / (2.0 * scale)
                x0, x1 = int(floor(cx - box_radius)), int(ceil(cx +
                                                               box_radius))
                y0, y1 = int(floor(cy - box_radius)), int(ceil(cy +
                                                               box_radius))

                # save patch...

                kwargs = rf.meta
                patch_affine = (
                    affine * Affine.translation(cx, cy) *
                    Affine.rotation(angle=-theta) *
                    Affine.translation(-patch_width / 2., -patch_height / 2.))

                if fmt == '.tif':
                    kwargs.update(driver='GTiff',
                                  compress='lzw',
                                  dtype=numpy.float32)
                elif fmt == '.jpg':
                    kwargs.update(driver='JPEG', quality=90, dtype=numpy.uint8)

                kwargs.update(transform=patch_affine,
                              width=patch_width,
                              height=patch_height)

                box_radius *= scale
                name = hashlib.md5(str(patch_affine) + raster).hexdigest()
                image_name = os.path.join(output_folder, name + fmt)

                if csv_file_name is not None:
                    with open(os.path.join(output_folder, csv_file_name),
                              'a+') as csvf:
                        fields = gx, gy, r1, r2, theta, patch_width, patch_height, image_name
                        csvf.write(','.join([str(_) for _ in fields]) + '\n')

                with rasterio.open(image_name, 'w', **kwargs) as pf:
                    assert isinstance(pf, RasterUpdater)
                    for band in range(rf.count):
                        patch = rf.read(
                            band + 1,
                            window=((y0, y1), (x0, x1)),
                            boundless=True,
                        )
                        patch = patch.astype(numpy.float32)
                        patch_rotated = rotate(patch, theta, reshape=False)
                        patch_scaled = zoom(patch_rotated, scale)
                        i0 = int(round(box_radius - patch_height / 2.))
                        i1 = i0 + patch_height
                        j0 = int(round(box_radius - patch_width / 2.))
                        j1 = j0 + patch_width
                        patch_cropped = patch_scaled[i0:i1, j0:j1]

                        if clip:
                            patch_cropped = numpy.clip(patch_cropped, clipmin,
                                                       clipmax)
                        if stretch:
                            patch_cropped = (patch_cropped -
                                             clipmin) / (clipmax - clipmin)
                            patch_cropped = patch_cropped * (
                                stretchmax - stretchmin) + stretchmin

                        if fmt == '.jpg':
                            # JPEG does not support floating point output. All we can do is 8 bit
                            # (python has not 12bit array type)
                            patch_cropped = img_as_ubyte(
                                patch_cropped.clip(-1, 1))
                        pf.write(patch_cropped, band + 1)
        if not silent:
            pbar.finish()

        logger.debug("Finished.")
Example #17
0
 def set_colors(self, image):
     self.color_data[...] = img_as_ubyte(image)
def check_for_image():

    import skimage
    import skimage.data
    import skimage.util


    img = skimage.data.lena()
    #img = io.imread("data/originalPics/2002/07/19/big/img_130.jpg")
    
    #img = io.imread("data/train_set/dataset/13/train/faces/00027998.png")
    
    img = rgb2gray(img)
    
    img = img_as_ubyte(img)  
    img=img[:,:,np.newaxis]
    

    #img = imread("data/processed_images/13_train_set_aflw/train/faces/001111.jpg")

    for mul in xrange(3, 20):

        im = resize(img, (mul*10, mul*10))
        im = img_as_ubyte(im) 
        arr = skimage.util.view_as_windows(im, (13, 13, NUM_CHANNELS), step=STEP_SIZE)
        f = file(LOAD_STATE_FROM_FILE, 'rb')
        obj = pickle.load(f)
        f.close()

        arr = np.rollaxis(arr, 5, 3)

        borrow = True
        shared_x = theano.shared(np.asarray(arr, dtype=theano.config.floatX),  # @UndefinedVariable
                                 borrow=borrow)

        iT = T.lscalar()
        jT = T.lscalar()

        x = T.tensor3("x")
        layer0_input = x.reshape((1, NUM_CHANNELS, 13, 13))

        net = twelve_net(layer0_input, None, relu, obj)
        prediction = net.log_regression_layer.y_pred
        py_x = net.log_regression_layer.p_y_given_x

        test_model = theano.function(
            [iT, jT],
            [prediction, py_x, layer0_input],
            givens={
                x: shared_x[iT, jT, 0, :, :, :]
            }
        )

        rows = arr.shape[0]
        cols = arr.shape[1]

        count = 0

        faces = []
        
        fig,axarr = plt.subplots(1,2)
        confidence_map = np.zeros((rows,cols))
        
        for i in xrange(rows):
            for j in xrange(cols):
                [y,p_y_given_x,f] = test_model(i,j)
                f=f.reshape(NUM_CHANNELS,13,13)
                f = np.rollaxis(f,0,3)
                f = f[:,:,0]
                confidence_map[i,j] = p_y_given_x[0,1]
                
#                 plt.imshow(f,cmap = "Greys_r")
#                 plt.show()



                if y == 1:
                    count += 1
                    faces.append([i*STEP_SIZE,j*STEP_SIZE])
                    print i,j

        print ("Check")
        # hack for gray
        im = im[:,:,0]
        
        confidence_map=np.pad(confidence_map, 6,'constant')
        
        axarr[0].get_xaxis().set_visible(False)
        axarr[0].get_yaxis().set_visible(False)        
        axarr[1].get_xaxis().set_visible(False)
        axarr[1].get_yaxis().set_visible(False)
        
        axarr[1].imshow(confidence_map,cmap = "Greys_r")
        axarr[0].imshow(im,cmap = "Greys_r")
        img_desc = plt.gca()
        
        
        #plt.imshow(im,cmap = "Greys_r")
        #img_desc = plt.gca()

        for point in faces:
            topleftx = point[1] 
            toplefty = point[0] 


            rect = patches.Rectangle(
                (topleftx, toplefty),
                13,
                13,
                fill=False,
                color='c'
            )

            axarr[0].add_patch(rect)

        print count
        fig_name = "lena_" + str(mul)+".png"
        plt.savefig(fig_name, bbox_inches='tight')