예제 #1
0
  def populate(self, channel_name, resolution, start_values, end_values, time_limit, neariso=False):
    proj = self.resource_interface.getProject()
    ch = NDChannel.fromName(proj, channel_name)
    cubedim = proj.datasetcfg.get_cubedim(resolution)
    image_size = proj.datasetcfg.get_imagesize(resolution)
  
    if time_limit is None:
      time_limit = ch.time_range[1]

    if end_values == [0,0,0]:
      end_values = image_size

    [x_start, y_start, z_start] = map(div, start_values, cubedim)
    x_end = (end_values[0] - 1) / cubedim[0] + 1
    y_end = (end_values[1] - 1) / cubedim[1] + 1
    z_end = (end_values[2] - 1) / cubedim[2] + 1

    db = SpatialDB(proj)
  
    for time_index in range(ch.time_range[0], time_limit, 1):
      for z_index in range(z_start, z_end, 1):
        for y_index in range(y_start, y_end, 1):
          for x_index in range(x_start, x_end, 1):
            
            print("Populating Cache with T:{},X:{},Y:{},Z:{}".format(time_index, x_index*cubedim[0], y_index*cubedim[1], z_index*cubedim[2]))
            zidx = XYZMorton([x_index, y_index, z_index])
            db.getCubes(ch, [time_index], [zidx], resolution, neariso=neariso)
예제 #2
0
    def test_page_in_multi_cuboids_y_dir(self):
        # Generate random data
        cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim * 2, self.z_dim])
        cube1.random()
        cube1.morton_id = 0

        sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)

        sp.write_cuboid(self.resource, (0, 0, 0), 0, cube1.data)

        cube2 = sp.cutout(self.resource, (0, 0, 0), (self.x_dim, self.y_dim * 2, self.z_dim), 0)

        np.testing.assert_array_equal(cube1.data, cube2.data)

        # Make sure data is the same
        np.testing.assert_array_equal(cube1.data, cube2.data)

        # Delete everything in the cache
        sp.kvio.cache_client.flushdb()

        # Force use of lambda function.
        sp.read_lambda_threshold = 0

        # Get the data again, which should trigger lambda page in.
        cube3 = sp.cutout(self.resource, (0, 0, 0), (self.x_dim, self.y_dim * 2, self.z_dim), 0)

        # Make sure the data is the same
        np.testing.assert_array_equal(cube1.data, cube3.data)
    def test_delayed_write_daemon_simple(self):
        """Test handling delayed writes"""
        sp = SpatialDB(self.kvio_config,
                       self.state_config,
                       self.object_store_config)
        dwd = DelayedWriteDaemon("boss-delayedwrited-test.pid")

        # Create some a single delayed write
        cube1 = Cube.create_cube(self.resource, [512, 512, 16])
        cube1.random()
        cube1.morton_id = 0
        res = 0
        time_sample = 0

        write_cuboid_base = "WRITE-CUBOID&{}&{}".format(self.resource.get_lookup_key(), 0)

        write_cuboid_key = sp.kvio.insert_cube_in_write_buffer(write_cuboid_base, res, cube1.morton_id,
                                                               cube1.to_blosc_by_time_index(time_sample))

        sp.cache_state.add_to_delayed_write(write_cuboid_key,
                                            self.resource.get_lookup_key(),
                                            res,
                                            cube1.morton_id,
                                            time_sample,
                                            self.resource.to_json())

        # Use Daemon To handle writes
        dwd.process(sp)
        time.sleep(30)

        # Make sure they went through
        cube2 = sp.cutout(self.resource, (0, 0, 0), (512, 512, 16), 0)

        np.testing.assert_array_equal(cube1.data, cube2.data)

        # Make sure delay key got deleted
        keys = sp.cache_state.get_all_delayed_write_keys()
        assert not keys
예제 #4
0
    def setUp(self):

        # Get data from nose2 layer based setup

        # Setup Data
        self.data = self.layer.setup_helper.get_image8_dict()
        self.resource = BossResourceBasic(self.data)

        # Setup config
        self.kvio_config = self.layer.kvio_config
        self.state_config = self.layer.state_config
        self.object_store_config = self.layer.object_store_config

        client = redis.StrictRedis(host=self.kvio_config['cache_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()
        client = redis.StrictRedis(host=self.state_config['cache_state_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()

        self.cache_miss = CacheMissDaemon('foo')
        self.sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
        self.cache_miss.set_spatialdb(self.sp)
예제 #5
0
    def setUp(self):

        # Setup Data
        self.data = self.layer.setup_helper.get_image8_dict()
        self.resource = BossResourceBasic(self.data)

        # Setup config
        self.kvio_config = self.layer.kvio_config
        self.state_config = self.layer.state_config
        self.object_store_config = self.layer.object_store_config

        client = redis.StrictRedis(host=self.kvio_config['cache_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()
        client = redis.StrictRedis(host=self.state_config['cache_state_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()

        # Suppress ResourceWarning messages about unclosed connections.
        warnings.simplefilter('ignore')
        self.prefetch = PrefetchDaemon('foo')
        self.sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
        self.prefetch.set_spatialdb(self.sp)
예제 #6
0
def handler(event, context):
    # Load settings
    SETTINGS = BossSettings.load()

    # Used as a guard against trying to delete the SQS message when lambda is
    # triggered by SQS.
    sqs_triggered = 'Records' in event and len(event['Records']) > 0

    if sqs_triggered :
        # Lambda invoked by an SQS trigger.
        msg_data = json.loads(event['Records'][0]['body'])
        # Load the project info from the chunk key you are processing
        chunk_key = msg_data['chunk_key']
        proj_info = BossIngestProj.fromSupercuboidKey(chunk_key)
        proj_info.job_id = msg_data['ingest_job']
    else:
        # Standard async invoke of this lambda.

        # Load the project info from the chunk key you are processing
        proj_info = BossIngestProj.fromSupercuboidKey(event["chunk_key"])
        proj_info.job_id = event["ingest_job"]

        # Get message from SQS ingest queue, try for ~2 seconds
        rx_cnt = 0
        msg_data = None
        msg_id = None
        msg_rx_handle = None
        while rx_cnt < 6:
            ingest_queue = IngestQueue(proj_info)
            try:
                msg = [x for x in ingest_queue.receiveMessage()]
            # StopIteration may be converted to a RunTimeError.
            except (StopIteration, RuntimeError):
                msg = None

            if msg:
                msg = msg[0]
                print("MESSAGE: {}".format(msg))
                print(len(msg))
                msg_id = msg[0]
                msg_rx_handle = msg[1]
                msg_data = json.loads(msg[2])
                print("MESSAGE DATA: {}".format(msg_data))
                break
            else:
                rx_cnt += 1
                print("No message found. Try {} of 6".format(rx_cnt))
                time.sleep(1)

        if not msg_id:
            # No tiles ready to ingest.
            print("No ingest message available")
            return

        # Get the chunk key of the tiles to ingest.
        chunk_key = msg_data['chunk_key']


    tile_error_queue = TileErrorQueue(proj_info)

    print("Ingesting Chunk {}".format(chunk_key))
    tiles_in_chunk = int(chunk_key.split('&')[1])

    # Setup SPDB instance
    sp = SpatialDB(msg_data['parameters']["KVIO_SETTINGS"],
                   msg_data['parameters']["STATEIO_CONFIG"],
                   msg_data['parameters']["OBJECTIO_CONFIG"])

    # Get tile list from Tile Index Table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    # tile_index_result (dict): keys are S3 object keys of the tiles comprising the chunk.
    tile_index_result = tile_index_db.getCuboid(msg_data["chunk_key"], int(msg_data["ingest_job"]))
    if tile_index_result is None:
        # If chunk_key is gone, another lambda uploaded the cuboids and deleted the chunk_key afterwards.
        if not sqs_triggered:
            # Remove message so it's not redelivered.
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)

        print("Aborting due to chunk key missing from tile index table")
        return

    # Sort the tile keys
    print("Tile Keys: {}".format(tile_index_result["tile_uploaded_map"]))
    tile_key_list = [x.rsplit("&", 2) for x in tile_index_result["tile_uploaded_map"].keys()]
    if len(tile_key_list) < tiles_in_chunk:
        print("Not a full set of 16 tiles. Assuming it has handled already, tiles: {}".format(len(tile_key_list)))
        if not sqs_triggered:
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)
        return
    tile_key_list = sorted(tile_key_list, key=lambda x: int(x[1]))
    tile_key_list = ["&".join(x) for x in tile_key_list]
    print("Sorted Tile Keys: {}".format(tile_key_list))

    # Augment Resource JSON data so it will instantiate properly that was pruned due to S3 metadata size limits
    resource_dict = msg_data['parameters']['resource']
    _, exp_name, ch_name = resource_dict["boss_key"].split("&")

    resource_dict["channel"]["name"] = ch_name
    resource_dict["channel"]["description"] = ""
    resource_dict["channel"]["sources"] = []
    resource_dict["channel"]["related"] = []
    resource_dict["channel"]["default_time_sample"] = 0
    resource_dict["channel"]["downsample_status"] = "NOT_DOWNSAMPLED"

    resource_dict["experiment"]["name"] = exp_name
    resource_dict["experiment"]["description"] = ""
    resource_dict["experiment"]["num_time_samples"] = 1
    resource_dict["experiment"]["time_step"] = None
    resource_dict["experiment"]["time_step_unit"] = None

    resource_dict["coord_frame"]["name"] = "cf"
    resource_dict["coord_frame"]["name"] = ""
    resource_dict["coord_frame"]["x_start"] = 0
    resource_dict["coord_frame"]["x_stop"] = 100000
    resource_dict["coord_frame"]["y_start"] = 0
    resource_dict["coord_frame"]["y_stop"] = 100000
    resource_dict["coord_frame"]["z_start"] = 0
    resource_dict["coord_frame"]["z_stop"] = 100000
    resource_dict["coord_frame"]["voxel_unit"] = "nanometers"

    # Setup the resource
    resource = BossResourceBasic()
    resource.from_dict(resource_dict)
    dtype = resource.get_numpy_data_type()

    # read all tiles from bucket into a slab
    tile_bucket = TileBucket(proj_info.project_name)
    data = []
    num_z_slices = 0
    for tile_key in tile_key_list:
        try:
            image_data, message_id, receipt_handle, metadata = tile_bucket.getObjectByKey(tile_key)
        except KeyError:
            print('Key: {} not found in tile bucket, assuming redelivered SQS message and aborting.'.format(
                tile_key))
            if not sqs_triggered:
                # Remove message so it's not redelivered.
                ingest_queue.deleteMessage(msg_id, msg_rx_handle)
            print("Aborting due to missing tile in bucket")
            return

        image_bytes = BytesIO(image_data)
        image_size = image_bytes.getbuffer().nbytes

        # Get tiles size from metadata, need to shape black tile if actual tile is corrupt.
        if 'x_size' in metadata:
            tile_size_x = metadata['x_size']
        else:
            print('MetadataMissing: x_size not in tile metadata:  using 1024.')
            tile_size_x = 1024

        if 'y_size' in metadata:
            tile_size_y = metadata['y_size']
        else:
            print('MetadataMissing: y_size not in tile metadata:  using 1024.')
            tile_size_y = 1024

        if image_size == 0:
            print('TileError: Zero length tile, using black instead: {}'.format(tile_key))
            error_msg = 'Zero length tile'
            enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
            tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
        else:
            try:
                # DP NOTE: Issues when specifying dtype in the asarray function with Pillow ver 8.3.1. 
                # Fixed by separating array instantiation and dtype assignment. 
                tile_img = np.asarray(Image.open(image_bytes))
                tile_img = tile_img.astype(dtype)
            except TypeError as te:
                print('TileError: Incomplete tile, using black instead (tile_size_in_bytes, tile_key): {}, {}'
                      .format(image_size, tile_key))
                error_msg = 'Incomplete tile'
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
            except OSError as oe:
                print('TileError: OSError, using black instead (tile_size_in_bytes, tile_key): {}, {} ErrorMessage: {}'
                      .format(image_size, tile_key, oe))
                error_msg = 'OSError: {}'.format(oe)
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)

        data.append(tile_img)
        num_z_slices += 1


    # Make 3D array of image data. It should be in XYZ at this point
    chunk_data = np.array(data)
    del data
    tile_dims = chunk_data.shape

    # Break into Cube instances
    print("Tile Dims: {}".format(tile_dims))
    print("Num Z Slices: {}".format(num_z_slices))
    num_x_cuboids = int(math.ceil(tile_dims[2] / CUBOIDSIZE[proj_info.resolution][0]))
    num_y_cuboids = int(math.ceil(tile_dims[1] / CUBOIDSIZE[proj_info.resolution][1]))

    print("Num X Cuboids: {}".format(num_x_cuboids))
    print("Num Y Cuboids: {}".format(num_y_cuboids))

    chunk_key_parts = BossUtil.decode_chunk_key(chunk_key)
    t_index = chunk_key_parts['t_index']
    for x_idx in range(0, num_x_cuboids):
        for y_idx in range(0, num_y_cuboids):
            # TODO: check time series support
            cube = Cube.create_cube(resource, CUBOIDSIZE[proj_info.resolution])
            cube.zeros()

            # Compute Morton ID
            # TODO: verify Morton indices correct!
            print(chunk_key_parts)
            morton_x_ind = x_idx + (chunk_key_parts["x_index"] * num_x_cuboids)
            morton_y_ind = y_idx + (chunk_key_parts["y_index"] * num_y_cuboids)
            print("Morton X: {}".format(morton_x_ind))
            print("Morton Y: {}".format(morton_y_ind))
            morton_index = XYZMorton([morton_x_ind, morton_y_ind, int(chunk_key_parts['z_index'])])

            # Insert sub-region from chunk_data into cuboid
            x_start = x_idx * CUBOIDSIZE[proj_info.resolution][0]
            x_end = x_start + CUBOIDSIZE[proj_info.resolution][0]
            x_end = min(x_end, tile_dims[2])
            y_start = y_idx * CUBOIDSIZE[proj_info.resolution][1]
            y_end = y_start + CUBOIDSIZE[proj_info.resolution][1]
            y_end = min(y_end, tile_dims[1])
            z_end = CUBOIDSIZE[proj_info.resolution][2]
            # TODO: get sub-array w/o making a copy.
            print("Yrange: {}".format(y_end - y_start))
            print("Xrange: {}".format(x_end - x_start))
            print("X start: {}".format(x_start))
            print("X stop: {}".format(x_end))
            cube.data[0, 0:num_z_slices, 0:(y_end - y_start), 0:(x_end - x_start)] = chunk_data[0:num_z_slices,
                                                                                 y_start:y_end, x_start:x_end]

            # Create object key
            object_key = sp.objectio.generate_object_key(resource, proj_info.resolution, t_index, morton_index)
            print("Object Key: {}".format(object_key))

            # Put object in S3
            sp.objectio.put_objects([object_key], [cube.to_blosc()])

            # Add object to index
            sp.objectio.add_cuboid_to_index(object_key, ingest_job=int(msg_data["ingest_job"]))

            # Update id indices if this is an annotation channel
            # We no longer index during ingest.
            #if resource.data['channel']['type'] == 'annotation':
            #   try:
            #       sp.objectio.update_id_indices(
            #           resource, proj_info.resolution, [object_key], [cube.data])
            #   except SpdbError as ex:
            #       sns_client = boto3.client('sns')
            #       topic_arn = msg_data['parameters']["OBJECTIO_CONFIG"]["prod_mailing_list"]
            #       msg = 'During ingest:\n{}\nCollection: {}\nExperiment: {}\n Channel: {}\n'.format(
            #           ex.message,
            #           resource.data['collection']['name'],
            #           resource.data['experiment']['name'],
            #           resource.data['channel']['name'])
            #       sns_client.publish(
            #           TopicArn=topic_arn,
            #           Subject='Object services misuse',
            #           Message=msg)

    lambda_client = boto3.client('lambda', region_name=SETTINGS.REGION_NAME)

    names = AWSNames.from_lambda(context.function_name)

    delete_tiles_data = {
        'tile_key_list': tile_key_list,
        'region': SETTINGS.REGION_NAME,
        'bucket': tile_bucket.bucket.name
    }

    # Delete tiles from tile bucket.
    lambda_client.invoke(
        FunctionName=names.delete_tile_objs.lambda_,
        InvocationType='Event',
        Payload=json.dumps(delete_tiles_data).encode()
    )       

    delete_tile_entry_data = {
        'tile_index': tile_index_db.table.name,
        'region': SETTINGS.REGION_NAME,
        'chunk_key': chunk_key,
        'task_id': msg_data['ingest_job']
    }

    # Delete entry from tile index.
    lambda_client.invoke(
        FunctionName=names.delete_tile_index_entry.lambda_,
        InvocationType='Event',
        Payload=json.dumps(delete_tile_entry_data).encode()
    )       

    if not sqs_triggered:
        # Delete message since it was processed successfully
        ingest_queue.deleteMessage(msg_id, msg_rx_handle)
예제 #7
0
class IntegrationTestPrefetchDaemon(unittest.TestCase):
    layer = AWSSetupLayer

    def setUp(self):

        # Setup Data
        self.data = self.layer.setup_helper.get_image8_dict()
        self.resource = BossResourceBasic(self.data)

        # Setup config
        self.kvio_config = self.layer.kvio_config
        self.state_config = self.layer.state_config
        self.object_store_config = self.layer.object_store_config

        client = redis.StrictRedis(host=self.kvio_config['cache_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()
        client = redis.StrictRedis(host=self.state_config['cache_state_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()

        # Suppress ResourceWarning messages about unclosed connections.
        warnings.simplefilter('ignore')
        self.prefetch = PrefetchDaemon('foo')
        self.sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
        self.prefetch.set_spatialdb(self.sp)

    def tearDown(self):
        """Clean kv store in between tests"""
        client = redis.StrictRedis(host=self.kvio_config['cache_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()
        client = redis.StrictRedis(host=self.state_config['cache_state_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()

    def test_add_to_prefetch(self):
        cuboid_dims = CUBOIDSIZE[0]
        # Cuboid dimensions.
        x_dim = cuboid_dims[0]
        y_dim = cuboid_dims[1]
        z_dim = cuboid_dims[2]

        cube_above = Cube.create_cube(self.resource, [x_dim, y_dim, z_dim])
        cube_above.random()

        # Write cuboid that are stacked vertically.
        self.sp.write_cuboid(self.resource, (0, 0, z_dim * 2), 0, cube_above.data)

        cube_above.morton_id = ndlib.XYZMorton([0, 0, z_dim * 2 // z_dim])

        cube_above_cache_key = self.sp.kvio.generate_cached_cuboid_keys(
            self.resource, 0, [0], [cube_above.morton_id])

        # Make sure cuboid saved.
        cube_act = self.sp.cutout(self.resource, (0, 0, z_dim * 2), (x_dim, y_dim, z_dim), 0)
        np.testing.assert_array_equal(cube_above.data, cube_act.data)

        # Clear cache so we can test prefetch.
        self.sp.kvio.cache_client.flushdb()

        # Also clear cache state before running test.
        self.sp.cache_state.status_client.flushdb()

        obj_keys = self.sp.objectio.cached_cuboid_to_object_keys(
            cube_above_cache_key)

        # Place a cuboid in the pretch queue.
        self.sp.cache_state.status_client.rpush('PRE-FETCH', obj_keys[0])

        # This is the system under test.
        self.prefetch.process()

        # Wait for cube to be prefetched.
        i = 0
        while not self.sp.kvio.cube_exists(cube_above_cache_key[0]) and i < 30:
            time.sleep(1)
            i += 1

        # Confirm cuboid now in cache.
        self.assertTrue(self.sp.kvio.cube_exists(cube_above_cache_key[0]))

        cube_act = self.sp.cutout(
            self.resource, (0, 0, z_dim * 2), (x_dim, y_dim, z_dim), 0)
        np.testing.assert_array_equal(cube_above.data, cube_act.data)
예제 #8
0
    def test_delayed_write_daemon_multiple(self):
        """Test handling multiple delayed writes"""
        sp = SpatialDB(self.kvio_config, self.state_config,
                       self.object_store_config)
        dwd = DelayedWriteDaemon("boss-delayedwrited-test.pid")

        # Create some a single delayed write
        cube1 = Cube.create_cube(self.resource, [512, 512, 16])
        cube1.random()
        cube1.morton_id = 0
        res = 0
        time_sample = 0
        cube1.data[0, 5, 100, 100] = 2
        cube1.data[0, 5, 100, 101] = 2
        cube1.data[0, 5, 100, 102] = 2

        write_cuboid_base = "WRITE-CUBOID&{}&{}".format(
            self.resource.get_lookup_key(), 0)

        write_cuboid_key = sp.kvio.insert_cube_in_write_buffer(
            write_cuboid_base, res, cube1.morton_id,
            cube1.to_blosc_by_time_index(time_sample))

        sp.cache_state.add_to_delayed_write(write_cuboid_key,
                                            self.resource.get_lookup_key(),
                                            res, cube1.morton_id, time_sample,
                                            self.resource.to_json())

        cube2 = Cube.create_cube(self.resource, [512, 512, 16])
        cube2.random()
        cube2.morton_id = 0
        res = 0
        time_sample = 0
        cube2.data[0, 5, 100, 100] = 0
        cube2.data[0, 5, 100, 101] = 1
        cube2.data[0, 5, 100, 102] = 0
        cube2.data[0, 5, 100, 103] = 0
        write_cuboid_key = sp.kvio.insert_cube_in_write_buffer(
            write_cuboid_base, res, cube2.morton_id,
            cube2.to_blosc_by_time_index(time_sample))

        sp.cache_state.add_to_delayed_write(write_cuboid_key,
                                            self.resource.get_lookup_key(),
                                            res, cube2.morton_id, time_sample,
                                            self.resource.to_json())

        # Use Daemon To handle writes
        dwd.process(sp)
        time.sleep(30)

        # Make sure they went through
        cube3 = sp.cutout(self.resource, (0, 0, 0), (512, 512, 16), 0)

        cube2.data[0, 5, 100, 100] = 2
        cube2.data[0, 5, 100, 101] = 1
        cube2.data[0, 5, 100, 102] = 2
        cube2.data[0, 5, 100, 103] = cube1.data[0, 5, 100, 103]

        np.testing.assert_array_equal(cube3.data, cube2.data)

        # Make sure delay key got deleted
        keys = sp.cache_state.get_all_delayed_write_keys()
        assert not keys
예제 #9
0
class MaxProjCatmaid:
    """Prefetch CATMAID tiles into MndcheDB"""
    def __init__(self):

        self.proj = None
        self.db = None
        self.token = None
        self.tilesz = 512

    def __del__(self):
        pass

    def getTileXY(self, ch, res, xtile, ytile, zslice, width):
        """Cutout, return the image"""

        # figure out the cutout (limit to max image size)
        xstart = xtile * self.tilesz
        ystart = ytile * self.tilesz
        xend = min((xtile + 1) * self.tilesz,
                   self.proj.datasetcfg.imageSize(res)[0][0])
        yend = min((ytile + 1) * self.tilesz,
                   self.proj.datasetcfg.imageSize(res)[0][1])

        zstart = max(zslice - width, 0)
        zend = min(zslice + 1 + width, self.tilesz,
                   self.proj.datasetcfg.imageSize(res)[0][2])

        # call the mcfc interface
        imageargs = '{}/{},{}/{},{}/{},{}/'.format(res, xstart, xend, ystart,
                                                   yend, zstart, zend)

        cutout = ndwsrest.cutout(imageargs, ch, self.proj, self.db)

        tiledata = np.amax(cutout.data, axis=0)
        tiledata = ndwsrest.window(tiledata, ch)

        # turn into an 8-bit image and return
        return Image.frombuffer('L', (tiledata.shape[1], tiledata.shape[0]),
                                tiledata.flatten(), 'raw', 'L', 0, 1)

    def getTile(self, webargs):
        """Either fetch the file from mndche or get a mcfc image"""

        try:
            # arguments of format /token/channel/(?:width:3)/slice_type/z/x_y_res.png
            m = re.match(
                "(\w+)/([\w+,[:\w]*]*)(?:/width:([\d+]+))?/(xy|yz|xz)/(\d+)/(\d+)_(\d+)_(\d+).png",
                webargs)

            [self.token, channel, widthstr,
             slice_type] = [i for i in m.groups()[:4]]
            [ztile, ytile, xtile, res] = [int(i) for i in m.groups()[4:]]

            # extract the width as an integer
            width = int(widthstr)

        except Exception, e:
            logger.error("Incorrect arguments for getTile {}. {}".format(
                webargs, e))
            raise NDWSError("Incorrect arguments for getTile {}. {}".format(
                webargs, e))

        self.proj = NDProject.fromTokenName(self.token)
        ch = self.proj.getChannelObj(channel)

        with closing(SpatialDB(self.proj)) as self.db:

            tile = None

            if tile == None:

                if slice_type == 'xy':
                    img = self.getTileXY(ch, res, xtile, ytile, ztile, width)
                # elif slice_type == 'xz':
                # img = self.getTileXZ(res, xtile, ytile, ztile, width)
                # elif slice_type == 'yz':
                # img = self.getTileYZ(res, xtile, ytile, ztile, width)
                else:
                    logger.error(
                        "Requested illegal image plane {}. Should be xy, xz, yz."
                        .format(slice_type))
                    raise NDWSError(
                        "Requested illegal image plane {}. Should be xy, xz, yz."
                        .format(slice_type))

                fobj = cStringIO.StringIO()
                img.save(fobj, "PNG")

            else:
                fobj = cStringIO.StringIO(tile)

            fobj.seek(0)
            return fobj
예제 #10
0
            time.sleep(.1)

    if flush_msg_data:
        # Got a message

        # Get Message Receipt Handle
        rx_handle = flush_msg_data['ReceiptHandle']

        # Load the message body
        flush_msg_data = json.loads(flush_msg_data['Body'])

        print("Message: {}".format(flush_msg_data))

        # Setup SPDB instance
        sp = SpatialDB(flush_msg_data["config"]["kv_config"],
                       flush_msg_data["config"]["state_config"],
                       flush_msg_data["config"]["object_store_config"])

        # Get the write-cuboid key to flush
        write_cuboid_key = flush_msg_data['write_cuboid_key']
        print("Flushing {} to S3".format(write_cuboid_key))

        # Create resource instance
        resource = BossResourceBasic()
        resource.from_dict(flush_msg_data["resource"])
    else:
        # Nothing to flush. Exit.
        print("No flush message available")
        sys.exit(0)

    # Check if cuboid is in S3
예제 #11
0
    def test_get_tight_bounding_box_multi_cuboids_z_axis(self):
        """
        Get the tight bounding box for an object that exists in two cuboids on the y axis.
        """
        resolution = 0
        [x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]

        id = 33333
        # Customize resource with so it writes to its own channel and uses a
        # coord frame large enough to encompass the data written.  This is
        # important for proper loose bounding box calculations.
        data = get_anno_dict(boss_key='col1&exp1&ch100', lookup_key='1&1&100')
        data['coord_frame']['x_stop'] = 10000
        data['coord_frame']['y_stop'] = 10000
        data['coord_frame']['z_stop'] = 10000
        resource = BossResourceBasic(data)
        time_sample = 0
        version = 0
        x_rng = [0, x_cube_dim]
        y_rng = [0, y_cube_dim]
        z_rng = [0, z_cube_dim]
        t_rng = [0, 1]

        cube_dim_tuple = (self.x_dim, self.y_dim, self.z_dim)
        cube1 = Cube.create_cube(resource, [self.x_dim, self.y_dim, self.z_dim])
        cube1.zeros()
        cube1.data[0][14][509][508] = id
        cube1.data[0][15][510][509] = id
        cube1.data[0][15][510][510] = id
        cube1.data[0][14][511][511] = id

        pos1 = [10*self.x_dim, 15*self.y_dim, 2*self.z_dim]
        cube1.morton_id = XYZMorton(pos1)

        cube2 = Cube.create_cube(resource, [self.x_dim, self.y_dim, self.z_dim])
        cube2.zeros()
        cube2.data[0][0][509][508] = id
        cube2.data[0][0][510][509] = id
        cube2.data[0][1][510][510] = id
        cube2.data[0][2][511][511] = id

        pos2 = [10*self.x_dim, 15*self.y_dim, 3*self.z_dim]
        cube2.morton_id = XYZMorton(pos2)

        sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
        sp.write_cuboid(resource, pos1, resolution, cube1.data, time_sample_start=0)
        sp.write_cuboid(resource, pos2, resolution, cube2.data, time_sample_start=0)

        # Make sure cube write complete and correct.
        actual_cube = sp.cutout(resource, pos1, cube_dim_tuple, resolution)
        np.testing.assert_array_equal(cube1.data, actual_cube.data)
        actual_cube2 = sp.cutout(resource, pos2, cube_dim_tuple, resolution)
        np.testing.assert_array_equal(cube2.data, actual_cube2.data)
        del cube1
        del actual_cube
        del cube2
        del actual_cube2


        # Method under test.
        actual = sp.get_bounding_box(resource, resolution, id, bb_type='tight')

        expected = {
            'x_range': [pos1[0]+508, pos2[0]+512],
            'y_range': [pos1[1]+509, pos2[1]+512],
            'z_range': [pos1[2]+14, pos2[2]+3],
            't_range': t_rng
        }

        self.assertEqual(expected, actual)
예제 #12
0
    def getHist(self):

        with closing(NDProjectsDB()) as projdb:
            proj = projdb.loadToken(self.token)

        with closing(SpatialDB(proj)) as db:
            ch = proj.getChannelObj(self.channel)

            [xcubedim, ycubedim,
             zcubedim] = cubedim = proj.datasetcfg.get_cubedim(self.res)
            effcorner = self.roi_lower
            effdim = self.roi_upper

            # get starting and ending indices
            zstart = (effcorner[2]) / zcubedim
            ystart = (effcorner[1]) / ycubedim
            xstart = (effcorner[0]) / xcubedim

            zend = (effdim[2]) / zcubedim
            yend = (effdim[1]) / ycubedim
            xend = (effdim[0]) / xcubedim

            hist_sum = np.zeros(self.numbins, dtype=np.uint32)

            # sum the histogram
            # we want to iterate over indices, checking for partial cubes
            for z in range(zstart, zend + 1):
                for y in range(ystart, yend + 1):
                    for x in range(xstart, xend + 1):
                        # cutout the data for the cube
                        cube = db.cutout(
                            ch, [x * xcubedim, y * ycubedim, z * zcubedim],
                            cubedim, self.res)

                        cubestart = [0, 0, 0]
                        cubeend = [xcubedim, ycubedim, zcubedim]

                        # check for partial cube
                        if x == xstart:
                            cubestart[0] = effcorner[0] - x * xcubedim
                        if y == ystart:
                            cubestart[1] = effcorner[1] - y * ycubedim
                        if z == zstart:
                            cubestart[2] = effcorner[2] - z * zcubedim

                        if x == xend:
                            cubeend[0] = effdim[0] - x * xcubedim
                        if y == yend:
                            cubeend[1] = effdim[1] - y * ycubedim
                        if z == zend:
                            cubeend[2] = effdim[2] - z * zcubedim

                        # trim cube if necessary
                        data = cube.data[cubestart[2]:cubeend[2],
                                         cubestart[1]:cubeend[1],
                                         cubestart[0]:cubeend[0]]

                        # compute the histogram and store it
                        (hist, bins) = np.histogram(data[data > 0],
                                                    bins=self.numbins,
                                                    range=(0, self.numbins))
                        hist_sum = np.add(hist_sum, hist)
                        logger.debug("Processed cube {} {} {}".format(x, y, z))

            return (hist_sum, bins)
예제 #13
0
        else:
            rx_cnt += 1
            print("No message found. Try {} of 6".format(rx_cnt))
            time.sleep(1)

    if not msg_id:
        # Nothing to flush. Exit.
        sys.exit("No ingest message available")

    # Get the write-cuboid key to flush
    chunk_key = msg_data['chunk_key']
    print("Ingesting Chunk {}".format(chunk_key))

    # Setup SPDB instance
    sp = SpatialDB(msg_data['parameters']["KVIO_SETTINGS"],
                   msg_data['parameters']["STATEIO_CONFIG"],
                   msg_data['parameters']["OBJECTIO_CONFIG"])

    # Get tile list from Tile Index Table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    # tile_index_result (dict): keys are S3 object keys of the tiles comprising the chunk.
    tile_index_result = tile_index_db.getCuboid(msg_data["chunk_key"], int(msg_data["ingest_job"]))
    if tile_index_result is None:
        # Remove message so it's not redelivered.
        ingest_queue.deleteMessage(msg_id, msg_rx_handle)
        sys.exit("Aborting due to chunk key missing from tile index table")

    # Sort the tile keys
    print("Tile Keys: {}".format(tile_index_result["tile_uploaded_map"]))
    tile_key_list = [x.rsplit("&", 2) for x in tile_index_result["tile_uploaded_map"].keys()]
    tile_key_list = sorted(tile_key_list, key=lambda x: int(x[1]))
예제 #14
0
class IntegrationTestCacheMissDaemon(unittest.TestCase):
    layer = AWSSetupLayer

    def setUp(self):

        # Get data from nose2 layer based setup

        # Setup Data
        self.data = self.layer.setup_helper.get_image8_dict()
        self.resource = BossResourceBasic(self.data)

        # Setup config
        self.kvio_config = self.layer.kvio_config
        self.state_config = self.layer.state_config
        self.object_store_config = self.layer.object_store_config

        client = redis.StrictRedis(host=self.kvio_config['cache_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()
        client = redis.StrictRedis(host=self.state_config['cache_state_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()

        self.cache_miss = CacheMissDaemon('foo')
        self.sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
        self.cache_miss.set_spatialdb(self.sp)

    def tearDown(self):
        """Clean kv store in between tests"""
        client = redis.StrictRedis(host=self.kvio_config['cache_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()
        client = redis.StrictRedis(host=self.state_config['cache_state_host'],
                                   port=6379, db=1, decode_responses=False)
        client.flushdb()

    def test_add_to_prefetch(self):
        cuboid_dims = CUBOIDSIZE[0]
        # Cuboid dimensions.
        x_dim = cuboid_dims[0]
        y_dim = cuboid_dims[1]
        z_dim = cuboid_dims[2]

        cube = Cube.create_cube(self.resource, [x_dim, y_dim, z_dim])
        cube.random()
        cube_above = Cube.create_cube(self.resource, [x_dim, y_dim, z_dim])
        cube_above.random()
        cube_below = Cube.create_cube(self.resource, [x_dim, y_dim, z_dim])
        cube_below.random()

        # Write 3 cuboids that are stacked vertically.
        self.sp.write_cuboid(self.resource, (0, 0, 0), 0, cube_below.data)
        self.sp.write_cuboid(self.resource, (0, 0, z_dim), 0, cube.data)
        self.sp.write_cuboid(self.resource, (0, 0, z_dim * 2), 0, cube_above.data)

        cube.morton_id = ndlib.XYZMorton([0, 0, z_dim // z_dim])
        cube_below.morton_id = ndlib.XYZMorton([0, 0, 0])
        cube_above.morton_id = ndlib.XYZMorton([0, 0, z_dim * 2 // z_dim])
        print('mortons: {}, {}, {}'.format(
            cube_below.morton_id, cube.morton_id, cube_above.morton_id))

        cube_below_cache_key, cube_cache_key, cube_above_cache_key = self.sp.kvio.generate_cached_cuboid_keys(
            self.resource, 0, [0],
            [cube_below.morton_id, cube.morton_id, cube_above.morton_id])

        # Make sure cuboids saved.
        cube_act = self.sp.cutout(self.resource, (0, 0, 0), (x_dim, y_dim, z_dim), 0)
        np.testing.assert_array_equal(cube_below.data, cube_act.data)
        cube_act = self.sp.cutout(self.resource, (0, 0, z_dim), (x_dim, y_dim, z_dim), 0)
        np.testing.assert_array_equal(cube.data, cube_act.data)
        cube_act = self.sp.cutout(self.resource, (0, 0, z_dim * 2), (x_dim, y_dim, z_dim), 0)
        np.testing.assert_array_equal(cube_above.data, cube_act.data)

        # Clear cache so we can get a cache miss.
        self.sp.kvio.cache_client.flushdb()

        # Also clear CACHE-MISS before running testing.
        self.sp.cache_state.status_client.flushdb()

        # Get middle cube again.  This should trigger a cache miss.
        cube_act = self.sp.cutout(self.resource, (0, 0, z_dim), (x_dim, y_dim, z_dim), 0)

        # Confirm there is a cache miss.
        misses = self.sp.cache_state.status_client.lrange('CACHE-MISS', 0, 10)
        print('misses:')
        print(misses)
        miss_actual = self.sp.cache_state.status_client.lindex('CACHE-MISS', 0)
        self.assertEqual(cube_cache_key, str(miss_actual, 'utf-8'))

        # This is the system under test.
        self.cache_miss.process()

        # Confirm PRE-FETCH has the object keys for the cube above and below.
        fetch_actual1 = self.sp.cache_state.status_client.lindex('PRE-FETCH', 0)
        fetch_actual2 = self.sp.cache_state.status_client.lindex('PRE-FETCH', 1)
        obj_keys = self.sp.objectio.cached_cuboid_to_object_keys(
            [cube_above_cache_key, cube_below_cache_key])
        self.assertEqual(obj_keys[0], str(fetch_actual1, 'utf-8'))
        self.assertEqual(obj_keys[1], str(fetch_actual2, 'utf-8'))
예제 #15
0
class SimpleCatmaid:
    """ Prefetch CATMAID tiles into MndcheDB """
    def __init__(self):
        """ Bind the mndche """

        self.proj = None
        self.channel = None
        self.tilesz = 512
        # make the memcache connection
        self.mc = pylibmc.Client(["127.0.0.1"],
                                 binary=True,
                                 behaviors={
                                     "tcp_nodelay": True,
                                     "ketama": True
                                 })

    def __del__(self):
        pass

    def buildKey(self, res, slice_type, xtile, ytile, ztile, timetile,
                 filterlist):
        return 'simple/{}/{}/{}/{}/{}/{}/{}/{}/{}'.format(
            self.token, self.channel, slice_type, res, xtile, ytile, ztile,
            timetile, filterlist)

    def cacheMissXY(self, res, xtile, ytile, ztile, timetile, filterlist):
        """On a miss. Cutout, return the image and load the cache in a background thread"""

        # make sure that the tile size is aligned with the cubedim
        if self.tilesz % self.proj.datasetcfg.cubedim[res][
                0] != 0 or self.tilesz % self.proj.datasetcfg.cubedim[res][1]:
            logger.error("Illegal tile size. Not aligned")
            raise NDWSError("Illegal tile size. Not aligned")

        # figure out the cutout (limit to max image size)
        xstart = xtile * self.tilesz
        ystart = ytile * self.tilesz
        xend = min((xtile + 1) * self.tilesz,
                   self.proj.datasetcfg.get_imagesize(res)[0])
        yend = min((ytile + 1) * self.tilesz,
                   self.proj.datasetcfg.get_imagesize(res)[1])

        # get an xy image slice
        if timetile is None:
            imageargs = '{}/{}/{}/{},{}/{},{}/{}/'.format(
                self.channel, 'xy', res, xstart, xend, ystart, yend, ztile)
        else:
            imageargs = '{}/{}/{}/{},{}/{},{}/{}/{}/'.format(
                self.channel, 'xy', res, xstart, xend, ystart, yend, ztile,
                timetile)

        # if filter list exists then add on for downstream processing
        if filterlist:
            imageargs = imageargs + 'filter/{}/'.format(filterlist)

        cb = ndwsrest.imgSlice(imageargs, self.proj, self.db)
        if cb.data.shape != (1, self.tilesz,
                             self.tilesz) and cb.data.shape != (
                                 1, 1, self.tilesz, self.tilesz):
            if timetile is None:
                tiledata = np.zeros((1, self.tilesz, self.tilesz),
                                    cb.data.dtype)
                tiledata[0, 0:((yend - 1) % self.tilesz + 1),
                         0:((xend - 1) % self.tilesz + 1)] = cb.data[0, :, :]
            else:
                tiledata = np.zeros((1, 1, self.tilesz, self.tilesz),
                                    cb.data.dtype)
                tiledata[0, 0, 0:((yend - 1) % self.tilesz + 1),
                         0:((xend - 1) % self.tilesz + 1)] = cb.data[0,
                                                                     0, :, :]
            cb.data = tiledata

        return cb.xyImage()

    def cacheMissXZ(self, res, xtile, ytile, ztile, timetile, filterlist):
        """On a miss. Cutout, return the image and load the cache in a background thread"""

        # make sure that the tile size is aligned with the cubedim
        if self.tilesz % self.proj.datasetcfg.cubedim[res][
                0] != 0 or self.tilesz % self.proj.datasetcfg.get_cubedim(
                    res)[2]:
            raise ("Illegal tile size. Not aligned")

        # figure out the cutout (limit to max image size)
        xstart = xtile * self.tilesz
        xend = min((xtile + 1) * self.tilesz,
                   self.proj.datasetcfg.get_imagesize(res)[0])

        # OK this weird but we have to choose a convention.  xtile ytile ztile refere to the URL request.  So ztile is ydata
        #  but xstart, zstart..... etc. refer to ndstore coordinates for the cutout.
        #
        # z cutouts need to get rescaled
        # we'll map to the closest pixel range and tolerate one pixel error at the boundary
        # scalefactor = zvoxel / yvoxel
        scalefactor = self.proj.datasetcfg.get_voxelres(
            res)[2] / self.proj.datasetcfg.get_voxelres(res)[1]
        zoffset = self.proj.datasetcfg.get_offset(res)[2]
        ztilestart = int((ytile * self.tilesz) / scalefactor) + zoffset
        zstart = max(ztilestart, zoffset)
        ztileend = int(math.ceil(
            (ytile + 1) * self.tilesz / scalefactor)) + zoffset
        zend = min(ztileend, self.proj.datasetcfg.get_imagesize(res)[2] + 1)

        # get an xz image slice
        if timetile is None:
            imageargs = '{}/{}/{}/{},{}/{}/{},{}/'.format(
                self.channel, 'xz', res, xstart, xend, ztile, zstart, zend)
        else:
            imageargs = '{}/{}/{}/{},{}/{}/{},{}/{}/'.format(
                self.channel, 'xz', res, xstart, xend, ztile, zstart, zend,
                timetile)

        if filterlist:
            imageargs = imageargs + 'filter/{}/'.format(filterlist)

        cb = ndwsrest.imgSlice(imageargs, self.proj, self.db)

        # scale by the appropriate amount
        if cb.data.shape != (ztileend - ztilestart, 1,
                             self.tilesz) and cb.data.shape != (
                                 1, ztileend - ztilestart, 1, self.tilesz):
            if timetile is None:
                tiledata = np.zeros((ztileend - ztilestart, 1, self.tilesz),
                                    cb.data.dtype)
                tiledata[0:zend - zstart, 0,
                         0:((xend - 1) % self.tilesz + 1)] = cb.data[:, 0, :]
            else:
                tiledata = np.zeros((1, ztileend - ztilestart, 1, self.tilesz),
                                    cb.data.dtype)
                tiledata[0, 0:zend - zstart, 0,
                         0:((xend - 1) % self.tilesz + 1)] = cb.data[0, :,
                                                                     0, :]
            cb.data = tiledata

        return cb.xzImage(scalefactor)

    def cacheMissYZ(self, res, xtile, ytile, ztile, timetile, filterlist):
        """ On a miss. Cutout, return the image and load the cache in a background thread """

        # make sure that the tile size is aligned with the cubedim
        if self.tilesz % self.proj.datasetcfg.get_cubedim(
                res)[1] != 0 or self.tilesz % self.proj.datasetcfg.get_cubedim(
                    res)[2]:
            raise ("Illegal tile size.  Not aligned")

        # figure out the cutout (limit to max image size)
        ystart = ytile * self.tilesz
        yend = min((ytile + 1) * self.tilesz,
                   self.proj.datasetcfg.get_imagesize(res)[1])

        # z cutouts need to get rescaled
        # we'll map to the closest pixel range and tolerate one pixel error at the boundary
        # Scalefactor = zvoxel / xvoxel
        scalefactor = self.proj.datasetcfg.get_voxelres(
            res)[2] / self.proj.datasetcfg.get_voxelres(res)[0]
        zoffset = self.proj.datasetcfg.get_offset(res)[2]
        ztilestart = int((ztile * self.tilesz) / scalefactor) + zoffset
        zstart = max(ztilestart, zoffset)
        ztileend = int(math.ceil(
            (ztile + 1) * self.tilesz / scalefactor)) + zoffset
        zend = min(ztileend, self.proj.datasetcfg.get_imagesize(res)[2] + 1)

        # get an yz image slice
        if timetile is None:
            imageargs = '{}/{}/{}/{}/{},{}/{},{}/'.format(
                self.channel, 'yz', res, xtile, ystart, yend, zstart, zend)
        else:
            imageargs = '{}/{}/{}/{}/{},{}/{},{}/{}/'.format(
                self.channel, 'yz', res, xtile, ystart, yend, zstart, zend,
                timetile)

        if filterlist:
            imageargs = imageargs + 'filter/{}/'.format(filterlist)

        cb = ndwsrest.imgSlice(imageargs, self.proj, self.db)

        # scale by the appropriate amount
        if cb.data.shape != (ztileend - ztilestart, self.tilesz,
                             1) and cb.data.shape != (1, ztileend - ztilestart,
                                                      self.tilesz, 1):
            if timetile is None:
                tiledata = np.zeros((ztileend - ztilestart, self.tilesz, 1),
                                    cb.data.dtype)
                tiledata[0:zend - zstart, 0:((yend - 1) % self.tilesz + 1),
                         0] = cb.data[:, :, 0]
            else:
                tiledata = np.zeros((1, ztileend - ztilestart, self.tilesz, 1),
                                    cb.data.dtype)
                tiledata[0, 0:zend - zstart, 0:((yend - 1) % self.tilesz + 1),
                         0] = cb.data[0, :, :, 0]
            cb.data = tiledata

        return cb.yzImage(scalefactor)

    def getTile(self, webargs):
        """Fetch the file from mndche or get a cutout from the database"""

        try:
            # argument of format token/channel/slice_type/z/y_x_res.png
            #      p = re.compile("(\w+)/([\w+,]*?)/(xy|yz|xz|)/(\d+/)?(\d+)/(\d+)_(\d+)_(\d+).png")
            p = re.compile(
                "(\w+)/([\w+,]*?)/(xy|yz|xz|)/(?:filter/([\d,]+)/)?(?:(\d+)/)?(\d+)/(\d+)_(\d+)_(\d+).png"
            )
            m = p.match(webargs)
            [self.token, self.channel, slice_type,
             filterlist] = [i for i in m.groups()[:4]]
            [timetile, ztile, ytile, xtile, res] = [
                int(i.strip('/')) if i is not None else None
                for i in m.groups()[4:]
            ]
        except Exception, e:
            logger.error("Incorrect arguments give for getTile {}. {}".format(
                webargs, e))
            raise NDWSError(
                "Incorrect arguments given for getTile {}. {}".format(
                    webargs, e))

        self.proj = NDProject.fromTokenName(self.token)

        with closing(SpatialDB(self.proj)) as self.db:

            # memcache key
            mckey = self.buildKey(res, slice_type, xtile, ytile, ztile,
                                  timetile, filterlist)

            # if tile is in memcache, return it
            tile = self.mc.get(mckey)
            tile = None

            if tile == None:
                if slice_type == 'xy':
                    img = self.cacheMissXY(res, xtile, ytile, ztile, timetile,
                                           filterlist)
                elif slice_type == 'xz':
                    img = self.cacheMissXZ(res, xtile, ytile, ztile, timetile,
                                           filterlist)
                elif slice_type == 'yz':
                    img = self.cacheMissYZ(res, ztile, xtile, ytile, timetile,
                                           filterlist)
                else:
                    logger.error(
                        "Requested illegal image plance {}. Should be xy, xz, yz."
                        .format(slice_type))
                    raise NDWSError(
                        "Requested illegal image plance {}. Should be xy, xz, yz."
                        .format(slice_type))

                fobj = cStringIO.StringIO()
                img.save(fobj, "PNG")
                self.mc.set(mckey, fobj.getvalue())

            else:
                print "Hit"
                fobj = cStringIO.StringIO(tile)

            fobj.seek(0)
            return fobj
예제 #16
0
# Expects these keys from the events dictionary:
# {
#   'kv_config': {...},
#   'state_config': {...},
#   'object_store_config': {...},
#   'object_key': '...',
#   'page_in_channel': '...'
# }

print("in s3_to_cache lambda")
import json
import sys
from spdb.spatialdb import SpatialDB

# Parse input args passed as a JSON string from the lambda loader
json_event = sys.argv[1]
event = json.loads(json_event)

# Setup SPDB instance
sp = SpatialDB(event['kv_config'], event['state_config'],
               event['object_store_config'])

object_key = event['object_key']
page_in_channel = event['page_in_channel']

cube_bytes = sp.objectio.get_single_object(object_key)
cache_keys = sp.objectio.object_to_cached_cuboid_keys([object_key])
sp.kvio.put_cubes(cache_keys[0], [cube_bytes])
if page_in_channel is not None:
    sp.cache_state.notify_page_in_complete(page_in_channel, object_key)
예제 #17
0
    def test_sqs_watcher_send_message(self):
        """Inject message into queue and test that SqsWatcher kicks off a lambda and writes cuboid to s3."""
        # Generate random data
        cube1 = Cube.create_cube(self.resource, [512, 512, 16])
        cube1.random()
        cube1.morton_id = 0

        sp = SpatialDB(self.kvio_config, self.state_config,
                       self.object_store_config)

        base_write_cuboid_key = "WRITE-CUBOID&{}&{}".format(
            self.resource.get_lookup_key(), 0)
        morton_idx = ndlib.XYZMorton([0, 0, 0])
        t = 0
        write_cuboid_key = sp.kvio.insert_cube_in_write_buffer(
            base_write_cuboid_key, t, morton_idx,
            cube1.to_blosc_by_time_index(t))

        # Put page out job on the queue
        sqs = boto3.client('sqs', region_name=get_region())

        msg_data = {
            "config": self.config_data,
            "write_cuboid_key": write_cuboid_key,
            "lambda-name": "s3_flush",
            "resource": self.resource.to_dict()
        }

        response = sqs.send_message(
            QueueUrl=self.object_store_config["s3_flush_queue"],
            MessageBody=json.dumps(msg_data))
        assert response['ResponseMetadata']['HTTPStatusCode'] == 200

        watcher = SqsWatcher(self.lambda_data)
        #  verify_queue() needs the be run multiple times to verify that the queue is not changing
        #  only then does it send off a lambda message.
        time.sleep(5)
        watcher.verify_queue()
        time.sleep(5)
        lambdas_invoked = watcher.verify_queue()
        if lambdas_invoked < 1:
            time.sleep(5)
            watcher.verify_queue()
        time.sleep(15)

        client = boto3.client('sqs', region_name=get_region())
        response = client.get_queue_attributes(
            QueueUrl=self.object_store_config["s3_flush_queue"],
            AttributeNames=[
                'ApproximateNumberOfMessages',
                'ApproximateNumberOfMessagesNotVisible'
            ])
        https_status_code = response['ResponseMetadata']['HTTPStatusCode']
        queue_count = int(
            response['Attributes']['ApproximateNumberOfMessages'])
        # test that the queue count is now 0
        assert queue_count == 0

        s3 = boto3.client('s3', region_name=get_region())
        objects_list = s3.list_objects(
            Bucket=self.object_store_config['cuboid_bucket'])
        # tests that bucket has some Contents.
        assert "Contents" in objects_list.keys()
예제 #18
0
    def ingestImageStack(self):
        """Ingest a TIF image stack"""

        # Load a database
        with closing(NDProjectsDB()) as projdb:
            proj = projdb.loadToken(self.token)

        with closing(SpatialDB(proj)) as db:

            s3_io = S3IO(db)
            # cuboidindex_db = CuboidIndexDB(proj.project_name, endpoint_url=ndsettings.DYNAMO_ENDPOINT)

            ch = proj.getChannelObj(self.channel)
            # get the dataset configuration
            [ximagesz, yimagesz,
             zimagesz] = proj.datasetcfg.dataset_dim(self.resolution)
            [starttime, endtime] = ch.time_range
            [xcubedim, ycubedim,
             zcubedim] = cubedim = proj.datasetcfg.get_cubedim(self.resolution)
            [xsupercubedim, ysupercubedim,
             zsupercubedim] = supercubedim = proj.datasetcfg.get_supercubedim(
                 self.resolution)
            [xoffset, yoffset,
             zoffset] = proj.datasetcfg.get_offset(self.resolution)

            if ch.channel_type in TIMESERIES_CHANNELS and (starttime == 0
                                                           and endtime == 0):
                logger.error("Timeseries Data cannot have timerange (0,0)")
                raise NDWSError("Timeseries Data cannot have timerange (0,0)")

            # Get a list of the files in the directories
            for timestamp in range(starttime, endtime):
                for slice_number in range(zoffset, zimagesz, zsupercubedim):
                    slab = np.zeros([1, zsupercubedim, yimagesz, ximagesz],
                                    dtype=ND_dtypetonp.get(
                                        ch.channel_datatype))
                    # fetch 16 slices at a time
                    if ch.channel_type in TIMESERIES_CHANNELS:
                        time_value = timestamp
                    else:
                        time_value = None
                    self.fetchData(
                        range(slice_number, slice_number + zsupercubedim)
                        if slice_number + zsupercubedim <= zimagesz else range(
                            slice_number, zimagesz),
                        time_value=time_value)
                    for b in range(zsupercubedim):
                        if (slice_number + b < zimagesz):
                            try:
                                # reading the raw data
                                file_name = "{}{}".format(
                                    self.path,
                                    self.generateFileName(slice_number + b))
                                # print "Open filename {}".format(file_name)
                                logger.info(
                                    "Open filename {}".format(file_name))

                                if ch.channel_datatype in [UINT8, UINT16]:
                                    try:
                                        image_data = np.asarray(
                                            Image.open(file_name, 'r'))
                                        slab[0, b, :, :] = image_data
                                    except Exception as e:
                                        slab[0, b, :, :] = np.zeros(
                                            (yimagesz, ximagesz),
                                            dtype=ND_dtypetonp.get(
                                                ch.channel_datatype))
                                        logger.warning(
                                            "File corrupted. Cannot open file. {}"
                                            .format(e))
                                elif ch.channel_datatype in [UINT32]:
                                    image_data = np.asarray(
                                        Image.open(file_name,
                                                   'r').convert('RGBA'))
                                    slab[0, b, :, :] = np.left_shift(
                                        image_data[:, :, 3],
                                        24,
                                        dtype=np.uint32) | np.left_shift(
                                            image_data[:, :, 2],
                                            16,
                                            dtype=np.uint32) | np.left_shift(
                                                image_data[:, :, 1],
                                                8,
                                                dtype=np.uint32) | np.uint32(
                                                    image_data[:, :, 0])
                                elif ch.channel_type in ANNOTATION_CHANNELS:
                                    image_data = np.asarray(
                                        Image.open(file_name, 'r'))
                                    slab[0, b, :, :] = image_data
                                else:
                                    logger.error("Cannot ingest this data yet")
                                    raise NDWSError(
                                        "Cannot ingest this data yet")
                            except IOError, e:
                                logger.warning("IOError {}.".format(e))
                                slab[0, b, :, :] = np.zeros(
                                    (yimagesz, ximagesz),
                                    dtype=ND_dtypetonp.get(
                                        ch.channel_datatype))

                    for y in range(0, yimagesz + 1, ysupercubedim):
                        for x in range(0, ximagesz + 1, xsupercubedim):

                            # Getting a Cube id and ingesting the data one cube at a time
                            zidx = XYZMorton([
                                x / xsupercubedim, y / ysupercubedim,
                                (slice_number - zoffset) / zsupercubedim
                            ])
                            cube = Cube.CubeFactory(supercubedim,
                                                    ch.channel_type,
                                                    ch.channel_datatype)
                            cube.zeros()

                            xmin, ymin = x, y
                            xmax = min(ximagesz, x + xsupercubedim)
                            ymax = min(yimagesz, y + ysupercubedim)
                            zmin = 0
                            zmax = min(slice_number + zsupercubedim,
                                       zimagesz + 1)

                            cube.data[0, 0:zmax - zmin, 0:ymax - ymin,
                                      0:xmax - xmin] = slab[0, zmin:zmax,
                                                            ymin:ymax,
                                                            xmin:xmax]
                            if cube.isNotZeros():
                                # cuboidindex_db.putItem(ch.channel_name, self.resolution, x, y, slice_number, ch.time_range[0])
                                # s3_io.putCube(ch, self.resolution, zidx, blosc.pack_array(cube.data))
                                s3_io.putCube(ch,
                                              timestamp,
                                              zidx,
                                              self.resolution,
                                              blosc.pack_array(cube.data),
                                              neariso=False)

                    # clean up the slices fetched
                    self.cleanData(
                        range(slice_number, slice_number +
                              zsupercubedim) if slice_number + zsupercubedim <=
                        zimagesz else range(slice_number, zimagesz))