예제 #1
0
def region_writer(panorama: Panoramas, lp=False, dlib=False, google=False):
    """
    Save detected regions to the objectstore

    :param panorama: panorama that has been sent for detection
    :param lp: flag to denote selected regions are Licenseplates
    :param dlib: flag to denote if selected regions are faces, detected by dlib
    :param google: flag to denote of selected regions are detected by google
    :return:
    """
    object_store = ObjectStore()
    output = io.StringIO()
    writer = csv.writer(output)

    regions = Region.objects.filter(pano_id=panorama.pano_id)
    writer.writerow(['region_type', 'left_top_x', 'left_top_y', 'right_top_x', 'right_top_y', 'right_bottom_x',
                     'right_bottom_y', 'left_bottom_x', 'left_bottom_y', 'detected_by'])
    for region in regions:
        writer.writerow([region.region_type, region.left_top_x, region.left_top_y, region.right_top_x,
                         region.right_top_y, region.right_bottom_x, region.right_bottom_y, region.left_bottom_x,
                         region.left_bottom_y, region.detected_by])

    suffix = 'd' if dlib else ('g' if google else '')
    if lp:
        csv_name = 'results/{}{}/regions_lp.csv'.format(panorama.path, panorama.filename[:-4])
    else:
        csv_name = 'results/{}{}/regions_f{}.csv'.format(panorama.path, panorama.filename[:-4], suffix)
    log.warning('saving {}'.format(csv_name))

    object_store.put_into_datapunt_store(csv_name, output.getvalue(), 'text/csv')
예제 #2
0
import logging

from swiftclient import ClientException
import re

from datasets.panoramas.models import Panoramas
from panorama.etl.check_objectstore import is_increment_uptodate, increment_exists
from panorama.etl.db_actions import restore_increment, clear_database, dump_increment
from panorama.etl.etl_settings import DUMP_FILENAME, INCREMENTS_CONTAINER
from panorama.objectstore_settings import PANORAMA_CONTAINERS
from panorama.shared.object_store import ObjectStore

log = logging.getLogger(__name__)
objectstore = ObjectStore()


def _remove_stale_increment(increment_path):
    """Remove an increment

        _remove_stale_increment('2015/05/07/')

    will remove the file: increments/2015/05/07/increment.dump from the objectstore

    :param container: the source container the increment is based on
    :param path: the path the increment is based on.
    :return: None
    """

    try:
        objectstore.panorama_conn.delete_object(
            INCREMENTS_CONTAINER, f"{increment_path}{DUMP_FILENAME}")
예제 #3
0
import io

from numpy import squeeze, dsplit, dstack, array
from scipy import misc
from scipy.ndimage import map_coordinates
from PIL import Image, ImageOps
import cv2

from panorama.shared.object_store import ObjectStore

PANORAMA_WIDTH = 8000
PANORAMA_HEIGHT = 4000
SAMPLE_WIDTH = 480
SAMPLE_HEIGHT = 320

object_store = ObjectStore()


def image2byte_array(image: Image, quality=80):
    """
    Translate PIL image to byte array
    :param image: PIL image
    :return: bytearray
    """
    img_byte_array = io.BytesIO()
    image.save(img_byte_array, quality=quality, format='JPEG')
    return img_byte_array.getvalue()


def image2byte_array_sized(image: Image, size=1000000):
    """
예제 #4
0
class ImportRegions(object):
    """
    Simple import script.
    It looks through the results-dir for regions. Expects panoramas in database
    Used for one-off (hopefully) restore of the given data.
    """
    object_store = ObjectStore()

    def process(self):
        regions = []
        for year in self.object_store.get_datapunt_subdirs('results/'):
            for month in self.object_store.get_datapunt_subdirs(year):
                for day in self.object_store.get_datapunt_subdirs(month):
                    csvs = self.object_store.get_detection_csvs(day)
                    for csv_file in csvs:
                        new_regions = self.process_detection_csvs(csv_file)
                        regions.extend(new_regions)
                        if len(regions) > 1000:
                            Region.objects.bulk_create(regions,
                                                       batch_size=BATCH_SIZE)
                            regions = []

        Region.objects.bulk_create(regions, batch_size=BATCH_SIZE)

    def process_detection_csvs(self, csv_file):
        regions = []

        pano_id = '_'.join(csv_file['name'].split('/')[-3:-1])
        csv_file_iterator = iter(
            self.object_store.get_datapunt_store_object(
                csv_file['name']).decode("utf-8").split('\n'))
        rows = csv.reader(csv_file_iterator,
                          delimiter=',',
                          quotechar='"',
                          quoting=csv.QUOTE_MINIMAL)
        headers = next(rows)
        panorama = None
        for idx, row in enumerate(rows):
            if panorama is None:
                panorama = Panoramas.objects.get(pano_id=pano_id)
            model_data = dict(zip(headers, row))
            region = self.process_region_row(model_data, panorama)
            if region:
                regions.append(region)

        return regions

    def process_region_row(self, model_data, panorama: Panoramas):
        try:
            region_type = model_data['region_type']
        except KeyError:
            return None

        return Region(pano_id=panorama.pano_id,
                      region_type=region_type,
                      detected_by=model_data['detected_by'],
                      left_top_x=model_data['left_top_x'],
                      left_top_y=model_data['left_top_y'],
                      right_top_x=model_data['right_top_x'],
                      right_top_y=model_data['right_top_y'],
                      right_bottom_x=model_data['right_bottom_x'],
                      right_bottom_y=model_data['right_bottom_y'],
                      left_bottom_x=model_data['left_bottom_x'],
                      left_bottom_y=model_data['left_bottom_y'])