Exemplo n.º 1
0
    def test_sfn_error(self, mSleep, mCreateName):
        mCreateName.return_value = 'ZZZ'
        iSession = MockSession()
        client = iSession.client('stepfunctions')
        client.list_state_machines.return_value = {
            'stateMachines': [{
                'stateMachineArn': 'XXX'
            }]
        }
        client.start_execution.side_effect = [{
            'executionArn': 'YYY'
        }, {
            'executionArn': 'YYYY'
        }]
        client.describe_execution.side_effect = [{
            'status': 'FAILED'
        }, {
            'status': 'RUNNING'
        }]
        client.get_execution_history.return_value = {
            'events': [{
                'executionFailedEventDetails': {
                    'error': 'error',
                    'cause': 'cause'
                }
            }]
        }

        try:
            fanout(iSession, 'XXX', [i for i in range(0, 2)])
            self.assertFalse(True, "fanout should result in an ActivityError")
        except ActivityError as e:
            self.assertEqual(e.error, 'error')
            self.assertEqual(e.cause, 'cause')

        calls = [
            mock.call.list_state_machines(),
            mock.call.start_execution(stateMachineArn='XXX',
                                      name='ZZZ',
                                      input='0'),
            mock.call.start_execution(stateMachineArn='XXX',
                                      name='ZZZ',
                                      input='1'),
            mock.call.list_state_machines(),
            mock.call.describe_execution(executionArn='YYY'),
            mock.call.get_execution_history(executionArn='YYY',
                                            reverseOrder=True),
            mock.call.describe_execution(executionArn='YYYY'),
            mock.call.stop_execution(executionArn='YYYY',
                                     error="Heaviside.Fanout",
                                     cause="Sub-process error detected")
        ]

        self.assertEqual(client.mock_calls, calls)
Exemplo n.º 2
0
    def test_args_list(self, mSleep, mCreateName):
        mCreateName.return_value = 'ZZZ'
        iSession = MockSession()
        client = iSession.client('stepfunctions')
        client.list_state_machines.return_value = {
            'stateMachines': [{
                'stateMachineArn': 'XXX'
            }]
        }
        client.start_execution.return_value = {'executionArn': 'YYY'}
        client.describe_execution.return_value = {
            'status': 'SUCCEEDED',
            'output': 'null'
        }

        expected = [None]
        actual = fanout(iSession, 'XXX', [i for i in range(0, 1)])

        self.assertEqual(actual, expected)

        calls = [
            mock.call.list_state_machines(),
            mock.call.start_execution(stateMachineArn='XXX',
                                      name='ZZZ',
                                      input='0'),
            mock.call.list_state_machines(),
            mock.call.describe_execution(executionArn='YYY')
        ]

        self.assertEqual(client.mock_calls, calls)
Exemplo n.º 3
0
def ingest_populate(args):
    """Populate the ingest upload SQS Queue with tile information

    Note: This activity will clear the upload queue of any existing
          messages

    Args:
        args: {
            'upload_sfn': ARN,

            'job_id': '',
            'upload_queue': ARN,
            'ingest_queue': ARN,

            'resolution': 0,
            'project_info': [col_id, exp_id, ch_id],

            't_start': 0,
            't_stop': 0,
            't_tile_size': 0,

            'x_start': 0,
            'x_stop': 0,
            'x_tile_size': 0,

            'y_start': 0,
            'y_stop': 0
            'y_tile_size': 0,

            'z_start': 0,
            'z_stop': 0
            'z_tile_size': 16,
        }

    Returns:
        {'arn': Upload queue ARN,
         'count': Number of messages put into the queue}
    """
    log.debug("Starting to populate upload queue")

    clear_queue(args['upload_queue'])

    results = fanout(aws.get_session(),
                     args['upload_sfn'],
                     split_args(args),
                     max_concurrent=MAX_NUM_PROCESSES,
                     rampup_delay=RAMPUP_DELAY,
                     rampup_backoff=RAMPUP_BACKOFF,
                     poll_delay=POLL_DELAY,
                     status_delay=STATUS_DELAY)

    total_sent = reduce(lambda x, y: x + y, results, 0)

    return {
        'arn': args['upload_queue'],
        'count': total_sent,
    }
Exemplo n.º 4
0
def downsample_channel(args):
    """
    Slice the given channel into chunks of 2x2x2 or 2x2x1 cubes that are then
    sent to the downsample_volume lambda for downsampling into a 1x1x1 cube at
    resolution + 1.

    Makes use of the bossutils.multidimensional library for simplified vector
    math.

    Args:
        args {
            downsample_volume_sfn (ARN)

            collection_id (int)
            experiment_id (int)
            channel_id (int)
            annotation_channel (bool)
            data_type (str) 'uint8' | 'uint16' | 'uint64'

            s3_bucket (URL)
            s3_index (URL)
            id_index (URL)

            x_start (int)
            y_start (int)
            z_start (int)

            x_stop (int)
            y_stop (int)
            z_stop (int)

            resolution (int) The resolution to downsample. Creates resolution + 1
            resolution_max (int) The maximum resolution to generate
            res_lt_max (bool) = args['resolution'] < (args['resolution_max'] - 1)

            annotation_index_max (int) The maximum resolution to index annotation channel cubes at
                                       When annotation_index_max = N, indices will exist for res 0 - (N - 1)

            type (str) 'isotropic' | 'anisotropic'
            iso_resolution (int) if resolution >= iso_resolution && type == 'anisotropic' downsample both
        }
    """

    #log.debug("Downsampling resolution " + str(args['resolution']))

    resolution = args['resolution']

    dim = XYZ(*CUBOIDSIZE[resolution])
    #log.debug("Cube dimensions: {}".format(dim))

    def frame(key):
        return XYZ(args[key.format('x')], args[key.format('y')], args[key.format('z')])

    # Figure out variables for isotropic, anisotropic, or isotropic and anisotropic
    # downsampling. If both are happening, fanout one and then the other in series.
    configs = []
    if args['type'] == 'isotropic':
        configs.append({
            'name': 'isotropic',
            'step': XYZ(2,2,2),
            'iso_flag': False,
            'frame_start_key': '{}_start',
            'frame_stop_key': '{}_stop',
        })
    else:
        configs.append({
            'name': 'anisotropic',
            'step': XYZ(2,2,1),
            'iso_flag': False,
            'frame_start_key': '{}_start',
            'frame_stop_key': '{}_stop',
        })

        if resolution >= args['iso_resolution']: # DP TODO: Figure out how to launch aniso iso version with mutating arguments
            configs.append({
                'name': 'isotropic',
                'step': XYZ(2,2,2),
                'iso_flag': True,
                'frame_start_key': 'iso_{}_start',
                'frame_stop_key': 'iso_{}_stop',
            })

    for config in configs:
        frame_start = frame(config['frame_start_key'])
        frame_stop = frame(config['frame_stop_key'])
        step = config['step']
        use_iso_flag = config['iso_flag'] # If the resulting cube should be marked with the ISO flag
        index_annotations = args['resolution'] < (args['annotation_index_max'] - 1)

        # Round to the furthest full cube from the center of the data
        cubes_start = frame_start // dim
        cubes_stop = ceildiv(frame_stop, dim)

        log.debug('Downsampling {} resolution {}'.format(config['name'], resolution))
        log.debug("Frame corner: {}".format(frame_start))
        log.debug("Frame extent: {}".format(frame_stop))
        log.debug("Cubes corner: {}".format(cubes_start))
        log.debug("Cubes extent: {}".format(cubes_stop))
        log.debug("Downsample step: {}".format(step))
        log.debug("Indexing Annotations: {}".format(index_annotations))

        # Call the downsample_volume lambda to process the data
        fanout(aws.get_session(),
               args['downsample_volume_sfn'],
               make_args(args, cubes_start, cubes_stop, step, dim, use_iso_flag, index_annotations),
               max_concurrent = MAX_NUM_PROCESSES,
               rampup_delay = RAMPUP_DELAY,
               rampup_backoff = RAMPUP_BACKOFF,
               poll_delay = POLL_DELAY,
               status_delay = STATUS_DELAY)

        # Resize the coordinate frame extents as the data shrinks
        # DP NOTE: doesn't currently work correctly with non-zero frame starts
        def resize(var, size):
            start = config['frame_start_key'].format(var)
            stop = config['frame_stop_key'].format(var)
            args[start] //= size
            args[stop] = ceildiv(args[stop], size)
        resize('x', step.x)
        resize('y', step.y)
        resize('z', step.z)

    # if next iteration will split into aniso and iso downsampling, copy the coordinate frame
    if args['type'] != 'isotropic' and (resolution + 1) == args['iso_resolution']:
        def copy(var):
            args['iso_{}_start'.format(var)] = args['{}_start'.format(var)]
            args['iso_{}_stop'.format(var)] = args['{}_stop'.format(var)]
        copy('x')
        copy('y')
        copy('z')

    # Advance the loop and recalculate the conditional
    # Using max - 1 because resolution_max should not be a valid resolution
    # and res < res_max will end with res = res_max - 1, which generates res_max resolution
    args['resolution'] = resolution + 1
    args['res_lt_max'] = args['resolution'] < (args['resolution_max'] - 1)
    return args
def ingest_populate(args):
    """Populate the ingest upload SQS Queue with tile information

    Note: This activity will clear the upload queue of any existing
          messages

    Args:
        args: {
            'upload_sfn': ARN,

            'job_id': '',
            'upload_queue': ARN,
            'ingest_queue': ARN,
            'ingest_type': int (0 == TILE, 1 == VOLUMETRIC),
            'resolution': 0,
            'project_info': [col_id, exp_id, ch_id],

            't_start': 0,
            't_stop': 0,
            't_tile_size': 0,

            'x_start': 0,
            'x_stop': 0,
            'x_tile_size': 0,

            'y_start': 0,
            'y_stop': 0
            'y_tile_size': 0,

            'z_start': 0,
            'z_stop': 0
            'z_tile_size': 1,
            'z_chunk_size': 16 for Tile or Probably 64 for Volumetric
        }

    Returns:
        {'arn': Upload queue ARN,
         'count': Number of messages put into the queue}
    """
    log.debug("Starting to populate upload queue")

    args['MAX_NUM_ITEMS_PER_LAMBDA'] = MAX_NUM_ITEMS_PER_LAMBDA

    if (args["ingest_type"] != 0) and (args["ingest_type"] != 1):
        raise ValueError("{}".format("Unknown ingest_type: {}".format(args["ingest_type"])))

    clear_queue(args['upload_queue'])

    results = fanout(aws.get_session(),
                     args['upload_sfn'],
                     split_args(args),
                     max_concurrent=MAX_NUM_PROCESSES,
                     rampup_delay=RAMPUP_DELAY,
                     rampup_backoff=RAMPUP_BACKOFF,
                     poll_delay=POLL_DELAY,
                     status_delay=STATUS_DELAY)

    # At least one None values in the return of fanout. This avoids an exception in those cases.
    if results is None:
        messages_uploaded = 0
    else:
        messages_uploaded = sum(filter(None, results))

    if args["ingest_type"] == 0:
        tile_count = get_tile_count(args)
        if tile_count != messages_uploaded:
            log.warning("Messages uploaded do not match tile count.  tile count: {} messages uploaded: {}"
                      .format(tile_count, messages_uploaded))
        else:
            log.debug("tile count and messages uploaded match: {}".format(tile_count))

        return {
            'arn': args['upload_queue'],
            'count': tile_count,
        }
    elif args["ingest_type"] == 1:
        vol_count = get_volumetric_count(args)
        if vol_count != messages_uploaded:
            log.warning("Messages uploaded do not match volumetric count.  volumetric count: {} messages uploaded: {}"
                        .format(vol_count, messages_uploaded))
        else:
            log.debug("volumetric count and messages uploaded match: {}".format(vol_count))

        return {
            'arn': args['upload_queue'],
            'count': vol_count,
        }
def ingest_populate(args):
    """Populate the ingest upload SQS Queue with tile information

    Note: This activity will clear the upload queue of any existing
          messages

    Args:
        args: {
            'upload_sfn': ARN,

            'job_id': '',
            'upload_queue': ARN,
            'ingest_queue': ARN,
            'ingest_type': int (0 == TILE, 1 == VOLUMETRIC),
            'resolution': 0,
            'project_info': [col_id, exp_id, ch_id],

            't_start': 0,
            't_stop': 0,
            't_tile_size': 0,

            'x_start': 0,
            'x_stop': 0,
            'x_tile_size': 0,

            'y_start': 0,
            'y_stop': 0
            'y_tile_size': 0,

            'z_start': 0,
            'z_stop': 0
            'z_tile_size': 1,
            'z_chunk_size': 16 for Tile or Probably 64 for Volumetric
        }

    Returns:
        {'arn': Upload queue ARN,
         'count': Number of messages put into the queue}
    """
    log.debug("Starting to populate upload queue")

    args['MAX_NUM_ITEMS_PER_LAMBDA'] = MAX_NUM_ITEMS_PER_LAMBDA

    if (args["ingest_type"] != 0) and (args["ingest_type"] != 1):
        raise ValueError("{}".format("Unknown ingest_type: {}".format(args["ingest_type"])))

    clear_queue(args['upload_queue'])

    results = fanout(aws.get_session(),
                     args['upload_sfn'],
                     split_args(args),
                     max_concurrent=MAX_NUM_PROCESSES,
                     rampup_delay=RAMPUP_DELAY,
                     rampup_backoff=RAMPUP_BACKOFF,
                     poll_delay=POLL_DELAY,
                     status_delay=STATUS_DELAY)

    # At least one None values in the return of fanout. This avoids an exception in those cases.
    if results is None:
        messages_uploaded = 0
    else:
        messages_uploaded = sum(filter(None, results))

    if args["ingest_type"] == 0:
        tile_count = get_tile_count(args)
        if tile_count != messages_uploaded:
            log.warning("Messages uploaded do not match tile count.  tile count: {} messages uploaded: {}"
                      .format(tile_count, messages_uploaded))
        else:
            log.debug("tile count and messages uploaded match: {}".format(tile_count))

        return {
            'arn': args['upload_queue'],
            'count': tile_count,
        }
    elif args["ingest_type"] == 1:
        vol_count = get_volumetric_count(args)
        if vol_count != messages_uploaded:
            log.warning("Messages uploaded do not match volumetric count.  volumetric count: {} messages uploaded: {}"
                        .format(vol_count, messages_uploaded))
        else:
            log.debug("volumetric count and messages uploaded match: {}".format(vol_count))

        return {
            'arn': args['upload_queue'],
            'count': vol_count,
        }