def update_item_old(table_name: str,
                    key: str,
                    attr_name: str,
                    attr_value,
                    correlation_id=new_correlation_id()):
    try:
        logger = get_logger()
        table = get_table(table_name)
        key_json = {'id': key}
        update = 'SET ' + attr_name + ' = :new_value, modified = :m'
        expression_json = {':new_value': attr_value, ':m': str(now_with_tz())}

        logger.info('dynamodb update',
                    extra={
                        'table_name': table_name,
                        'key': key,
                        'attr_name': attr_name,
                        'attr_value': attr_value,
                        'correlation_id': correlation_id
                    })
        response = table.update_item(Key=key_json,
                                     UpdateExpression=update,
                                     ExpressionAttributeValues=expression_json)
    except Exception as ex:
        raise ex
def process_task_signup(notification):
    logger = get_logger()
    correlation_id = new_correlation_id()

    try:
        # get basic data out of notification
        signup_details = notification['details']
        user_task_id = signup_details['id']

        # get additional data that hubspot needs from database
        extra_data = get_task_signup_data_for_crm(user_task_id, correlation_id)

        # put it all together for dispatch to HubSpot
        signup_details.update(extra_data)
        signup_details['signup_event_type'] = 'Sign-up'

        # check here that we have a hubspot id
        if signup_details['crm_id'] is None:
            errorjson = {'user_task_id': user_task_id, 'correlation_id': str(correlation_id)}
            raise DetailedValueError('user does not have crm_id', errorjson)
        else:
            post_task_signup_to_crm(signup_details, correlation_id)
            mark_notification_processed(notification, correlation_id)
    except Exception as ex:
        error_message = str(ex)
        mark_notification_failure(notification, error_message, correlation_id)
def scan(table_name: str,
         filter_attr_name: str = None,
         filter_attr_values=None,
         correlation_id=new_correlation_id()):
    try:
        logger = get_logger()
        table = get_table(table_name)

        # accept string but make it into a list for later processing
        if isinstance(filter_attr_values, str):
            filter_attr_values = [filter_attr_values]
        logger.info('dynamodb scan',
                    extra={
                        'table_name': table_name,
                        'filter_attr_name': filter_attr_name,
                        'filter_attr_value': str(filter_attr_values),
                        'correlation_id': correlation_id
                    })
        if filter_attr_name is None:
            response = table.scan()
        else:
            filter_expr = Attr(filter_attr_name).eq(filter_attr_values[0])
            for value in filter_attr_values[1:]:
                filter_expr = filter_expr | Attr(filter_attr_name).eq(value)
            response = table.scan(FilterExpression=filter_expr)
        items = response['Items']
        logger.info('dynamodb scan result',
                    extra={
                        'count': str(len(items)),
                        'correlation_id': correlation_id
                    })
        return items
    except Exception as ex:
        raise ex
def process_user_registration(notification):
    logger = get_logger()
    correlation_id = new_correlation_id()
    try:
        notification_id = notification['id']
        details = notification['details']
        user_id = details['id']
        logger.info('process_user_registration: post to hubspot',
                    extra={'notification_id': str(notification_id), 'user_id': str(user_id), 'email': details['email'], 'correlation_id': str(correlation_id)})
        hubspot_id, is_new = post_new_user_to_crm(details, correlation_id)
        logger.info('process_user_registration: hubspot details',
                    extra={'notification_id': str(notification_id), 'hubspot_id': str(hubspot_id), 'isNew': str(is_new), 'correlation_id': str(correlation_id)})

        if hubspot_id == -1:
            errorjson = {'user_id': user_id, 'correlation_id': str(correlation_id)}
            raise DetailedValueError('could not find user in HubSpot', errorjson)

        user_jsonpatch = [
            {'op': 'replace', 'path': '/crm_id', 'value': str(hubspot_id)},
        ]

        patch_user(user_id, user_jsonpatch, now_with_tz(), correlation_id)

        mark_notification_processed(notification, correlation_id)
    except Exception as ex:
        error_message = str(ex)
        mark_notification_failure(notification, error_message, correlation_id)
Exemplo n.º 5
0
def main():
    # Argument parser
    parser = argparse.ArgumentParser()
    parser.add_argument("product", help="Venus product (ZIP)")
    parser.add_argument("coordinates", help="coordinate file")
    parser.add_argument("-e", "--extent", type=int, \
                        help="extent of the square ROI (meters), defaults to 100", default=100)
    parser.add_argument("-v",
                        "--verbose",
                        help="Set verbosity to DEBUG level",
                        action="store_true",
                        default=False)
    args = parser.parse_args()

    # Create the logger
    logger = utl.get_logger('roistats', args.verbose)

    # Create a Venus product object
    vns_product = common.Product.Product_zip_venus(args.product, logger)

    # Create an roi collection
    roi_collection = common.Roi.Roi_collection(args.coordinates, args.extent,
                                               logger)

    # Compute statistics for a given product
    list_stats = roi_collection.compute_stats_all_bands(vns_product,
                                                        logger,
                                                        stdout=True,
                                                        withAOT=True,
                                                        withVAP=True)

    sys.exit(0)
def process_notifications(event, context):
    logger = get_logger()
    notifications = get_notifications(NotificationAttributes.STATUS.value, [NotificationStatus.NEW.value, NotificationStatus.RETRYING.value])

    logger.info('process_notifications', extra = {'count': str(len(notifications))})

    # note that we need to process all registrations first, then do task signups (otherwise we might try to process a signup for someone not yet registered)
    signup_notifications = []
    login_notifications = []
    for notification in notifications:
        notification_type = notification['type']
        if notification_type == NotificationType.USER_REGISTRATION.value:
            process_user_registration(notification)
        elif notification_type == NotificationType.TASK_SIGNUP.value:
            # add to list for later processing
            signup_notifications.append(notification)
        elif notification_type == NotificationType.USER_LOGIN.value:
            # add to list for later processing
            login_notifications.append(notification)
        else:
            # todo details
            raise Exception

    for signup_notification in signup_notifications:
        process_task_signup(signup_notification)

    for login_notification in login_notifications:
        process_user_login(login_notification)
def scan_old(table_name: str,
             filter_attr_name: str = None,
             filter_attr_value=None,
             correlation_id=new_correlation_id()):
    try:
        logger = get_logger()
        table = get_table(table_name)
        logger.info('dynamodb scan',
                    extra={
                        'table_name': table_name,
                        'filter_attr_name': filter_attr_name,
                        'filter_attr_value': filter_attr_value,
                        'correlation_id': correlation_id
                    })
        if filter_attr_name is None:
            response = table.scan()
        else:
            response = table.scan(
                FilterExpression=Attr(filter_attr_name).eq(filter_attr_value))
        items = response['Items']
        logger.info('dynamodb scan result',
                    extra={
                        'count': str(len(items)),
                        'correlation_id': correlation_id
                    })
        return items
    except Exception as ex:
        raise ex
def process_user_login(notification):
    logger = get_logger()
    correlation_id = new_correlation_id()

    try:
        # get basic data out of notification
        login_details = notification['details']
        posting_result = post_user_login_to_crm(login_details, correlation_id)
        marking_result = mark_notification_processed(notification, correlation_id)
        return posting_result, marking_result

    except Exception as ex:
        error_message = str(ex)
        mark_notification_failure(notification, error_message, correlation_id)
def update_item(table_name: str,
                key: str,
                name_value_pairs: dict,
                correlation_id=new_correlation_id()):
    try:
        logger = get_logger()
        table = get_table(table_name)
        key_json = {'id': key}
        update_expr = 'SET #modified = :m '
        values_expr = {':m': str(now_with_tz())}
        attr_names_expr = {
            '#modified': 'modified'
        }  # not strictly necessary, but allows easy addition of names later
        param_count = 1
        for name, value in name_value_pairs.items():
            param_name = ':p' + str(param_count)
            map_name = None
            if name == 'status':  # todo generalise this to other reserved words, and ensure it only catches whole words
                if '.' in name:
                    map_name = name.split('.')[0]
                attr_name = '#a' + str(param_count)
                attr_names_expr[attr_name] = 'status'
            else:
                attr_name = name
            if map_name is not None:
                attr_name = map_name + '.' + attr_name
            update_expr += ', ' + attr_name + ' = ' + param_name

            values_expr[param_name] = str(value)

            param_count += 1

        logger.info('dynamodb update',
                    extra={
                        'table_name': table_name,
                        'key': key,
                        'update_expr': update_expr,
                        'values_expr': values_expr,
                        'correlation_id': correlation_id
                    })
        response = table.update_item(
            Key=key_json,
            UpdateExpression=update_expr,
            ExpressionAttributeValues=values_expr,
            ExpressionAttributeNames=attr_names_expr,
        )
        return response
    except Exception as ex:
        raise ex
def delete_item(table_name: str, key: str,
                correlation_id=new_correlation_id()):
    try:
        logger = get_logger()
        table = get_table(table_name)
        key_json = {'id': key}
        logger.info('dynamodb delete',
                    extra={
                        'table_name': table_name,
                        'key': key,
                        'correlation_id': correlation_id
                    })
        response = table.delete_item(Key=key_json)
    except Exception as err:
        raise err
def delete_all(table_name: str, correlation_id=new_correlation_id()):
    try:
        logger = get_logger()
        table = get_table(table_name)
        items = scan(table_name)
        for item in items:
            key = item['id']
            key_json = {'id': key}
            logger.info('dynamodb delete_all',
                        extra={
                            'table_name': table_name,
                            'key': key,
                            'correlation_id': correlation_id
                        })
            table.delete_item(Key=key_json)
    except Exception as err:
        raise err
def dateformattest(event, context):
    logger = get_logger()
    logger.info('dateformattest', extra=event)
    try:
        test_json = json.loads(event['body'])
        logger.info('body:', extra=test_json)
        date_string = test_json['date']
        date_string = date_string[:19]   # strip timezone and milliseconds
        format_string = test_json['format']
        logger.info('dateformattest', extra={'date_string': date_string, 'format_string': format_string})
        datetime_obj = datetime.strptime(date_string, format_string)
        created_timestamp = int(datetime_obj.timestamp() * 1000)

        response = {"statusCode": 200, "body": json.dumps({"created_timestamp": str(created_timestamp)})}
        return response
    except:
        logger.error(sys.exc_info()[0])
def get_item(table_name: str, key: str, correlation_id=new_correlation_id()):
    try:
        logger = get_logger()
        table = get_table(table_name)
        key_json = {'id': key}
        logger.info('dynamodb get',
                    extra={
                        'table_name': table_name,
                        'key': key,
                        'correlation_id': correlation_id
                    })
        response = table.get_item(Key=key_json)
        if 'Item' in response:
            return response['Item']
        else:
            # not found
            return None
    except Exception as ex:
        raise ex
Exemplo n.º 14
0
def run_tasks(calling_func, val, lock):
    from common.utilities import get_logger
    logger = get_logger(fname='/backup/rabbit.log.out')
    try:
        parameters = pika.ConnectionParameters(**CONNECTION_PARAMETERS)
        factory = PikaFactory(parameters, 0, calling_func, logger)

        for i in range(total_tasks):
            reactor.connectTCP(parameters.host, parameters.port, factory)

        logger.info(' [*] Waiting for messages. To exit press CTRL+C')
        looping_call = task.LoopingCall(factory.stop_tasks, val, lock)
        looping_call.start(5)
        reactor.run()
    except:
        logger.exception("Error")
        if reactor.running: reactor.stop()

    logger.info("Process Finished - %s" % os.getpid())
def put_item(table_name: str,
             key,
             item_type: str,
             item_details,
             item: dict,
             update_allowed=False,
             correlation_id=new_correlation_id()):
    try:
        logger = get_logger()
        table = get_table(table_name)

        item['id'] = str(key)
        item['type'] = item_type
        item['details'] = item_details
        now = str(now_with_tz())
        item['created'] = now
        item['modified'] = now

        logger.info('dynamodb put',
                    extra={
                        'table_name': table_name,
                        'item': item,
                        'correlation_id': correlation_id
                    })
        if update_allowed:
            response = table.put_item(Item=item)
        else:
            response = table.put_item(
                Item=item, ConditionExpression='attribute_not_exists(id)')
    except ClientError as ex:
        error_code = ex.response['Error']['Code']
        errorjson = {
            'error_code': error_code,
            'table_name': table_name,
            'item_type': item_type,
            'id': str(key),
            'correlation_id': correlation_id
        }
        raise DuplicateInsertError('item already exists', errorjson)
import os
import sys
import time
import multiprocessing
from common.SharedFibonacci import Manager
from common.utilities import get_basic_logger, get_logger

workers = 4
total_process = workers + 1
# logger = get_basic_logger()

logger = get_logger(fname='/backup/rabbit.log.out')


def launch_process(calling_func, val, lock):
    logger.info("Inside Launch Process with PId - %s" % os.getpid())
    from AsyncPika.task_factory import run_tasks
    run_tasks(calling_func, val, lock)


def handle_error(e):
    logger.exception(e)


def terminate_handler(val):
    logger.info("PID - %s Checking for stop.txt" % os.getpid())
    while val.value == 0:
        # logger.info("stop instruction -> "+ str(os.path.isfile("stop.txt")))
        if os.path.isfile("stop.txt"):
            val.value = 1
            break
Exemplo n.º 17
0
def main():
    # Argument parser
    parser = argparse.ArgumentParser()
    parser.add_argument("list", help="List of paths of collection")
    parser.add_argument("--band",
                        help="Specific band in acix band definition",
                        type=int,
                        required=True)
    parser.add_argument(
        "--samples",
        help="Reflectance sampling, defaults to 100 (ie. 0.01)",
        type=int,
        default=100)
    parser.add_argument(
        "-s",
        "--save",
        help="Write location results as npy instead of stacking in memory",
        action="store_true",
        default=False)
    parser.add_argument("-v",
                        "--verbose",
                        help="Set verbosity to DEBUG level",
                        action="store_true",
                        default=False)
    parser.add_argument("--negative",
                        help="Save sr lt 0",
                        action="store_true",
                        default=False)
    parser.add_argument("--keepall",
                        help="Keep sr <= 0 but cloudfree",
                        action="store_true",
                        default=False)
    parser.add_argument("--stack",
                        help="Stack all sites in one file",
                        action="store_true",
                        default=False)

    args = parser.parse_args()

    # <maja_band>, <hdf_band>, <resolution>, <ref_samples>, <maja_samples>
    bdef_acix = (["band02", "SRE_B2.", "R1",
                  []], ["band03", "SRE_B3.", "R1",
                        []], ["band04", "SRE_B4.", "R1",
                              []], ["band05", "SRE_B5.", "R2",
                                    []], ["band06", "SRE_B6.", "R2",
                                          []], ["band07", "SRE_B7.", "R2", []],
                 ["band08", "SRE_B8.", "R1",
                  []], ["band8a", "SRE_B8A.", "R2",
                        []], ["band11", "SRE_B11.", "R2",
                              []], ["band12", "SRE_B12.", "R2", []])

    band_id = args.band

    # Create the logger
    logger = utl.get_logger('acix_validate_' + bdef_acix[band_id][0],
                            args.verbose)

    if (band_id < 0 or band_id > len(bdef_acix)):
        logger.error("Band ID out of range with value %i" % band_id)
        sys.exit(3)

    # vector containers for stacked data
    v_stacked_valid_ref = np.zeros((0))
    v_stacked_valid_maja = np.zeros((0))
    v_stacked_lt0_ref = np.zeros((0))
    v_stacked_lt0_maja = np.zeros((0))
    v_stacked_keep_all_ref = np.zeros((0))
    v_stacked_keep_all_maja = np.zeros((0))

    match_count = 0
    len_check = 0

    f = open(args.list, 'r')
    paths_list = f.read().splitlines()

    for p in paths_list:
        paths = p.split(',')
        location_name = paths[0].split('/')[-1]

        # vector containers for location specific data
        v_local_valid_ref = np.zeros((0))
        v_local_valid_maja = np.zeros((0))
        v_local_lt0_ref = np.zeros((0))
        v_local_lt0_maja = np.zeros((0))

        acix_vermote_collection = clc.Collection(paths[0], logger)
        acix_maja_collection = clc.Collection(paths[1], logger)
        compare = cmp.Comparison(acix_vermote_collection, acix_maja_collection,
                                 logger)

        for match in compare.matching_products:
            logger.info("One-by-one for %s between %s and %s" %
                        (match[0], match[1], match[2]))
            p_ref = prd.Product_hdf_acix(match[1], logger)
            p_maja = prd.Product_dir_maja(match[2], logger)

            try:
                b_ref = p_ref.get_band(p_ref.find_band(bdef_acix[band_id][0]),
                                       scalef=p_ref.sre_scalef)
                m_ref_qa = p_ref.get_band(p_ref.find_band("refqa"))
                b_maja = p_maja.get_band(p_maja.find_band(
                    bdef_acix[band_id][1]),
                                         scalef=p_maja.sre_scalef)
                clm = p_maja.get_band(
                    p_maja.find_band("CLM_" + bdef_acix[band_id][2]))
                edg = p_maja.get_band(
                    p_maja.find_band("EDG_" + bdef_acix[band_id][2]))
                m_maja_qa, ratio = p_maja.get_mask(clm, edg, stats=True)
                del clm
                del edg

                # default filter : any cloudfree flaged both by ref and maja and sr >= 0
                is_valid = np.where((b_ref > 0)
                                    & (b_maja > 0)
                                    & (m_ref_qa == 1)
                                    & (m_maja_qa == 1))

                if args.negative:
                    # get sr < 0 though flaged cloudfree
                    is_cloudfree_but_negative = np.where((m_maja_qa == 1)
                                                         & (m_ref_qa == 1)
                                                         & ((b_maja < 0)
                                                            | (b_ref < 0)))

                if args.keepall:
                    # get sr < 0 though flaged cloudfree
                    is_cloudfree_keep_all = np.where((m_maja_qa == 1)
                                                     & (m_ref_qa == 1))

                # stack local valid values for all timestamp matches
                v_local_valid_ref = np.append(v_local_valid_ref,
                                              (b_ref[is_valid]))
                v_local_valid_maja = np.append(v_local_valid_maja,
                                               (b_maja[is_valid]))

                if args.negative:
                    # stack local cloudfree negative sr for all timestamp matches
                    v_local_lt0_ref = np.append(
                        v_local_lt0_ref, (b_ref[is_cloudfree_but_negative]))
                    v_local_lt0_maja = np.append(
                        v_local_lt0_maja, (b_maja[is_cloudfree_but_negative]))

                if args.keepall:
                    v_local_keep_all_ref = np.append(
                        v_local_lt0_ref, (b_ref[is_cloudfree_keep_all]))
                    v_local_keep_all_maja = np.append(
                        v_local_lt0_maja, (b_maja[is_cloudfree_keep_all]))

                match_count += 1
                len_check += len(b_ref[is_valid])

                if args.stack:
                    # if all locations have to be stacked in one single vector
                    v_stacked_valid_ref = np.append(v_stacked_valid_ref,
                                                    v_local_valid_ref)
                    v_stacked_valid_maja = np.append(v_stacked_valid_maja,
                                                     v_local_valid_maja)

                    if args.negative:
                        v_stacked_lt0_ref = np.append(v_stacked_lt0_ref,
                                                      v_local_lt0_ref)
                        v_stacked_lt0_maja = np.append(v_stacked_lt0_maja,
                                                       v_local_lt0_maja)

                    if args.keepall:
                        v_stacked_keep_all_ref = np.append(
                            v_stacked_keep_all_ref, v_local_keep_all_ref)
                        v_stacked_keep_all_maja = np.append(
                            v_stacked_keep_all_maja, v_local_keep_all_maja)

                else:
                    # save local vectors in one compressed file
                    if not args.keepall:
                        np.savez_compressed(
                            location_name + "_valid_" + bdef_acix[band_id][0],
                            [
                                v_local_valid_ref.astype('float32'),
                                v_local_valid_maja.astype('float32')
                            ])
                    if args.negative:
                        np.savez_compressed(
                            location_name + "_sr_lt_0_" +
                            bdef_acix[band_id][0], [
                                v_local_lt0_ref.astype('float32'),
                                v_local_lt0_maja.astype('float32')
                            ])

                    if args.keepall:
                        np.savez_compressed(
                            location_name + "_keep_all_" +
                            bdef_acix[band_id][0], [
                                v_local_keep_all_ref.astype('float32'),
                                v_local_keep_all_maja.astype('float32')
                            ])

            except TypeError as err:
                logger.warning(
                    "Had to skip comparison for %s because of unexpected product dimension (see previous error)"
                    % (match[0]))

    if args.stack:
        # if --stack, save stacked vector in one single compressed file
        np.savez_compressed("Stacked_valid_" + bdef_acix[band_id][0], [
            v_stacked_valid_ref.astype('float32'),
            v_stacked_valid_maja.astype('float32')
        ])
        if args.negative:
            np.savez_compressed("Stacked_sr_lt_0_" + bdef_acix[band_id][0], [
                v_stacked_lt0_ref.astype('float32'),
                v_stacked_lt0_maja.astype('float32')
            ])

        if args.keepall:
            np.savez_compressed("Stacked_sr_keep_all_" + bdef_acix[band_id][0],
                                [
                                    v_stacked_keep_all_ref.astype('float32'),
                                    v_stacked_keep_all_maja.astype('float32')
                                ])

        if len_check == len(v_stacked_valid_ref) and len_check == len(
                v_stacked_valid_maja):
            logger.info("Saved %i samples to %s.npy" %
                        (len_check, location_name))
        else:
            logger.error(
                "Inconsistent sample len between len_check=%i and len(v_stacked_valid_ref)=%i"
                % (len_check, len(v_stacked_valid_ref)))

    sys.exit(0)
Exemplo n.º 18
0
def main():
    # Argument parser
    parser = argparse.ArgumentParser()
    parser.add_argument("list", help="List of paths of collection")
    parser.add_argument("--saveto",
                        help="subdirectory to save figs to",
                        type=str)
    parser.add_argument("--hist",
                        help="Display quicklooks with histograms",
                        action="store_true",
                        default=False)
    parser.add_argument(
        "--keepall",
        help="Display quicklooks with histograms with keep_all",
        action="store_true",
        default=False)
    parser.add_argument("-v",
                        "--verbose",
                        help="Set verbosity to DEBUG level",
                        action="store_true",
                        default=False)

    args = parser.parse_args()
    gain_true = 3.
    gain_false = 2.5

    if args.keepall:
        l_stats = []

    # Create the logger
    logger = utl.get_logger('Diag', args.verbose)

    bdef_acix = (["band02", "SRE_B2.", "R1",
                  []], ["band03", "SRE_B3.", "R1",
                        []], ["band04", "SRE_B4.", "R1",
                              []], ["band05", "SRE_B5.", "R2",
                                    []], ["band06", "SRE_B6.", "R2",
                                          []], ["band07", "SRE_B7.", "R2", []],
                 ["band08", "SRE_B8.", "R1",
                  []], ["band8a", "SRE_B8A.", "R2",
                        []], ["band11", "SRE_B11.", "R2",
                              []], ["band12", "SRE_B12.", "R2", []])

    f = open(args.list, 'r')
    paths_list = f.read().splitlines()

    for p in paths_list:
        paths = p.split(',')
        location_name = paths[0].split('/')[-1]

        acix_vermote_collection = clc.Collection(paths[0], logger)
        acix_maja_collection = clc.Collection(paths[1], logger)
        compare = cmp.Comparison(acix_vermote_collection, acix_maja_collection,
                                 logger)

        for match in compare.matching_products:
            logger.info("One-by-one for %s between %s and %s" %
                        (match[0], match[1], match[2]))
            p_ref = prd.Product_hdf_acix(match[1], logger)
            p_maja = prd.Product_dir_maja(match[2], logger)
            timestamp = match[0]

            try:
                b_ref_b2 = p_ref.get_band(p_ref.find_band(bdef_acix[0][0]),
                                          scalef=p_ref.sre_scalef)
                b_ref_b3 = p_ref.get_band(p_ref.find_band(bdef_acix[1][0]),
                                          scalef=p_ref.sre_scalef)
                b_ref_b4 = p_ref.get_band(p_ref.find_band(bdef_acix[2][0]),
                                          scalef=p_ref.sre_scalef)
                b_ref_b8 = p_ref.get_band(p_ref.find_band(bdef_acix[6][0]),
                                          scalef=p_ref.sre_scalef)
                m_ref_qa = p_ref.get_band(p_ref.find_band("refqa"))
                b_maja_b2 = p_maja.get_band(p_maja.find_band(bdef_acix[0][1]),
                                            scalef=p_maja.sre_scalef)
                b_maja_b3 = p_maja.get_band(p_maja.find_band(bdef_acix[1][1]),
                                            scalef=p_maja.sre_scalef)
                b_maja_b4 = p_maja.get_band(p_maja.find_band(bdef_acix[2][1]),
                                            scalef=p_maja.sre_scalef)
                b_maja_b8 = p_maja.get_band(p_maja.find_band(bdef_acix[6][1]),
                                            scalef=p_maja.sre_scalef)
                b_maja_aot = p_maja.get_band(p_maja.find_band("ATB_R1"),
                                             layer=1,
                                             scalef=p_maja.aot_scalef)
                b_maja_vap = p_maja.get_band(p_maja.find_band("ATB_R1"),
                                             layer=0,
                                             scalef=p_maja.vap_scalef)
                clm = p_maja.get_band(
                    p_maja.find_band("CLM_" + bdef_acix[0][2]))
                edg = p_maja.get_band(
                    p_maja.find_band("EDG_" + bdef_acix[0][2]))
                m_maja_qa, ratio = p_maja.get_mask(clm, edg, stats=True)

                if args.hist:
                    fig, axs = pl.subplots(nrows=3, ncols=3, figsize=[12, 12])
                    fig.suptitle(location_name + ' ' + timestamp[0:4] + '-' +
                                 timestamp[4:6] + '-' + timestamp[6:8],
                                 fontsize=16)

                    cset_true = axs[0, 0].imshow(np.dstack(
                        (b_maja_b4 * gain_true, b_maja_b3 * gain_true,
                         b_maja_b2 * gain_true)),
                                                 interpolation='none',
                                                 aspect='equal')
                    axs[0, 0].set_title("Maja quicklook (B4, B3, B2)")
                    cset_maja_cloud_contour = axs[0, 0].contour(m_maja_qa)
                    # axs[0, 0].clabel(cset_maja_cloud_contour, inline=1, fontsize=10)

                    cset_maja_qa = axs[1, 0].imshow(m_maja_qa,
                                                    interpolation='none',
                                                    aspect='equal',
                                                    vmin=0,
                                                    vmax=1,
                                                    cmap='gray')
                    axs[1, 0].set_title("Maja CLM & EDG (valid=1)")
                    divider = make_axes_locatable(axs[1, 0])
                    cax = divider.append_axes("right", size="5%", pad=0.05)
                    pl.colorbar(cset_maja_qa,
                                cax=cax)  # , orientation='horizontal')

                    # cset_ref_qa = axs[2, 0].imshow(m_ref_qa, interpolation='none', aspect='equal', vmin=0, vmax=1, cmap='gray')
                    # axs[2, 0].set_title("Reference QA (valid=1)")
                    # divider = make_axes_locatable(axs[2, 0])
                    # cax = divider.append_axes("right", size="5%", pad=0.05)
                    # pl.colorbar(cset_ref_qa, cax=cax)  # , orientation='horizontal')

                    cset_true = axs[2, 0].imshow(np.dstack(
                        (b_ref_b4 * gain_true, b_ref_b3 * gain_true,
                         b_ref_b2 * gain_true)),
                                                 interpolation='none',
                                                 aspect='equal')
                    axs[2, 0].set_title("Ref quicklook (B4, B3, B2)")
                    axs[2, 0].contour(m_ref_qa)

                    # cset_false = axs[0, 1].imshow(np.dstack((b_maja_b8*gain_false, b_maja_b3*gain_false, b_maja_b2*gain_false)), interpolation='none', aspect='equal')
                    # axs[0, 1].set_title("%s %s (B8,B3,B2)" % (location_name, timestamp))

                    cset_maja_vap = axs[0, 1].imshow(b_maja_vap,
                                                     interpolation='none',
                                                     aspect='equal',
                                                     cmap='RdBu')
                    axs[0, 1].set_title("Maja VAP $(g.cm^{-2})$")
                    divider = make_axes_locatable(axs[0, 1])
                    cax = divider.append_axes("right", size="5%", pad=0.05)
                    pl.colorbar(cset_maja_vap, cax=cax,
                                format='%4.2f')  # , orientation='horizontal')

                    # cset_maja_vap = axs[0, 1].imshow(np.dstack((b_maja_b4*gain_true, b_maja_b3*gain_true, b_maja_b2*gain_true)), interpolation='none', aspect='equal')
                    # axs[0, 1].set_title("Maja VAP")
                    # divider = make_axes_locatable(axs[0, 1])
                    # cax = divider.append_axes("right", size="5%", pad=0.05)
                    # pl.colorbar(cset_maja_vap, cax=cax)  # , orientation='horizontal')
                    # cset_maja_vap_contour = axs[0, 1].contour(b_maja_vap)
                    # axs[0, 1].clabel(cset_maja_vap_contour, inline=1, fontsize=10)

                    cset_maja_aot = axs[0, 2].imshow(b_maja_aot, cmap='Wistia')
                    axs[0, 2].imshow(np.dstack(
                        (b_maja_b4 * gain_true, b_maja_b3 * gain_true,
                         b_maja_b2 * gain_true)),
                                     interpolation='none',
                                     aspect='equal')
                    axs[0, 2].set_title("Maja AOT $(-)$")
                    divider = make_axes_locatable(axs[0, 2])
                    cax = divider.append_axes("right", size="5%", pad=0.05)
                    pl.colorbar(cset_maja_aot, cax=cax,
                                format='%4.2f')  # , orientation='horizontal')
                    cset_maja_aot_contour = axs[0, 2].contour(b_maja_aot,
                                                              cmap='Wistia')
                    axs[0, 2].clabel(cset_maja_aot_contour,
                                     inline=1,
                                     fontsize=10)

                    # B2
                    if args.keepall:
                        is_valid = np.where((m_ref_qa == 1) & (m_maja_qa == 1))
                        min_sr = -0.1
                        max_sr = 0.7
                        is_log = False
                        filter_label = "(QA=1)"
                        b_ref_b2_is_valid_count = len(
                            b_ref_b2[is_valid].flatten())
                        search = np.where(b_ref_b2[is_valid] < 0)
                        b_ref_b2_is_valid_and_lt0_count = len(
                            b_ref_b2[is_valid][search].flatten())
                        search = np.where(b_maja_b2[is_valid] < 0)
                        b_maja_b2_is_valid_and_lt0_count = len(
                            b_maja_b2[is_valid][search].flatten())

                    else:
                        is_valid = np.where((b_ref_b2 > 0)
                                            & (b_maja_b2 > 0)
                                            & (m_ref_qa == 1)
                                            & (m_maja_qa == 1))
                        min_sr = 0
                        max_sr = 1
                        is_log = False
                        filter_label = "(QA=1 & sr>0)"

                    axs[1, 1].hist(b_ref_b2[is_valid].flatten(),
                                   bins=200,
                                   histtype='step',
                                   log=is_log,
                                   label='Ref',
                                   range=(min_sr, max_sr),
                                   density=False)
                    axs[1, 1].hist(b_maja_b2[is_valid].flatten(),
                                   bins=200,
                                   histtype='step',
                                   log=is_log,
                                   label='Maja',
                                   range=(min_sr, max_sr),
                                   density=False)
                    axs[1,
                        1].set_title("B2 " + filter_label + " RMSE=%8.6f" %
                                     utl.rmse(b_ref_b2[is_valid].flatten(),
                                              b_maja_b2[is_valid].flatten()))
                    axs[1, 1].legend()

                    # B3
                    if args.keepall:
                        is_valid = np.where((m_ref_qa == 1) & (m_maja_qa == 1))
                        min_sr = -0.1
                        max_sr = 0.7
                        is_log = False
                        filter_label = "(QA=1)"
                        b_ref_b3_is_valid_count = len(
                            b_ref_b3[is_valid].flatten())
                        search = np.where(b_ref_b3[is_valid] < 0)
                        b_ref_b3_is_valid_and_lt0_count = len(
                            b_ref_b3[is_valid][search].flatten())
                        search = np.where(b_maja_b3[is_valid] < 0)
                        b_maja_b3_is_valid_and_lt0_count = len(
                            b_maja_b3[is_valid][search].flatten())

                    else:
                        is_valid = np.where((b_ref_b3 > 0)
                                            & (b_maja_b3 > 0)
                                            & (m_ref_qa == 1)
                                            & (m_maja_qa == 1))
                        min_sr = 0
                        max_sr = 1
                        is_log = False
                        filter_label = "(QA=1 & sr>0)"

                    axs[1, 2].hist(b_ref_b3[is_valid].flatten(),
                                   bins=200,
                                   histtype='step',
                                   log=is_log,
                                   label='Ref',
                                   range=(min_sr, max_sr))
                    axs[1, 2].hist(b_maja_b3[is_valid].flatten(),
                                   bins=200,
                                   histtype='step',
                                   log=is_log,
                                   label='Maja',
                                   range=(min_sr, max_sr))
                    axs[1,
                        2].set_title("B3 " + filter_label + " RMSE=%8.6f" %
                                     utl.rmse(b_ref_b3[is_valid].flatten(),
                                              b_maja_b3[is_valid].flatten()))
                    axs[1, 2].legend()

                    # B4
                    if args.keepall:
                        is_valid = np.where((m_ref_qa == 1) & (m_maja_qa == 1))
                        min_sr = -0.1
                        max_sr = 0.7
                        is_log = False
                        filter_label = "(QA=1)"
                        b_ref_b4_is_valid_count = len(
                            b_ref_b4[is_valid].flatten())
                        search = np.where(b_ref_b4[is_valid] < 0)
                        b_ref_b4_is_valid_and_lt0_count = len(
                            b_ref_b4[is_valid][search].flatten())
                        search = np.where(b_maja_b4[is_valid] < 0)
                        b_maja_b4_is_valid_and_lt0_count = len(
                            b_maja_b4[is_valid][search].flatten())

                    else:
                        is_valid = np.where((b_ref_b4 > 0)
                                            & (b_maja_b4 > 0)
                                            & (m_ref_qa == 1)
                                            & (m_maja_qa == 1))
                        min_sr = 0
                        max_sr = 1
                        is_log = False
                        filter_label = "(QA=1 & sr>0)"

                    axs[2, 1].hist(b_ref_b4[is_valid].flatten(),
                                   bins=200,
                                   histtype='step',
                                   log=is_log,
                                   label='Ref',
                                   range=(min_sr, max_sr))
                    axs[2, 1].hist(b_maja_b4[is_valid].flatten(),
                                   bins=200,
                                   histtype='step',
                                   log=is_log,
                                   label='Maja',
                                   range=(min_sr, max_sr))
                    axs[2,
                        1].set_title("B4 " + filter_label + " RMSE=%8.6f" %
                                     utl.rmse(b_ref_b4[is_valid].flatten(),
                                              b_maja_b4[is_valid].flatten()))
                    axs[2, 1].legend()

                    # B8
                    if args.keepall:
                        is_valid = np.where((m_ref_qa == 1) & (m_maja_qa == 1))
                        min_sr = -0.1
                        max_sr = 0.7
                        is_log = False
                        filter_label = "(QA=1)"
                        b_ref_b8_is_valid_count = len(
                            b_ref_b8[is_valid].flatten())
                        search = np.where(b_ref_b8[is_valid] < 0)
                        b_ref_b8_is_valid_and_lt0_count = len(
                            b_ref_b8[is_valid][search].flatten())
                        search = np.where(b_maja_b8[is_valid] < 0)
                        b_maja_b8_is_valid_and_lt0_count = len(
                            b_maja_b8[is_valid][search].flatten())

                    else:
                        is_valid = np.where((b_ref_b8 > 0)
                                            & (b_maja_b8 > 0)
                                            & (m_ref_qa == 1)
                                            & (m_maja_qa == 1))
                        min_sr = 0
                        max_sr = 1
                        is_log = False
                        filter_label = "(QA=1 & sr>0)"

                    axs[2, 2].hist(b_ref_b8[is_valid].flatten(),
                                   bins=200,
                                   histtype='step',
                                   log=is_log,
                                   label='Ref',
                                   range=(min_sr, max_sr))
                    axs[2, 2].hist(b_maja_b8[is_valid].flatten(),
                                   bins=200,
                                   histtype='step',
                                   log=is_log,
                                   label='Maja',
                                   range=(min_sr, max_sr))
                    axs[2,
                        2].set_title("B8 " + filter_label + " RMSE=%8.6f" %
                                     utl.rmse(b_ref_b8[is_valid].flatten(),
                                              b_maja_b8[is_valid].flatten()))
                    axs[2, 2].legend()

                    fig.tight_layout()
                    fig.subplots_adjust(top=0.88)
                    pl.savefig(location_name + '_' + timestamp +
                               '_All_quicklooks.png')
                    pl.close('all')

                    if args.keepall:
                        l_stats = l_stats + [[
                            location_name + timestamp, b_ref_b2_is_valid_count,
                            b_ref_b3_is_valid_count, b_ref_b4_is_valid_count,
                            b_ref_b8_is_valid_count,
                            b_ref_b2_is_valid_and_lt0_count,
                            b_ref_b3_is_valid_and_lt0_count,
                            b_ref_b4_is_valid_and_lt0_count,
                            b_ref_b8_is_valid_and_lt0_count,
                            b_maja_b2_is_valid_and_lt0_count,
                            b_maja_b3_is_valid_and_lt0_count,
                            b_maja_b4_is_valid_and_lt0_count,
                            b_maja_b8_is_valid_and_lt0_count
                        ]]

                else:
                    try:
                        fig, axs = pl.subplots(figsize=[12, 12])
                        pl.title(location_name + ' ' + timestamp[0:4] + '-' +
                                 timestamp[4:6] + '-' + timestamp[6:8],
                                 fontsize=16)

                        axs.imshow(np.dstack(
                            (b_maja_b4 * gain_true, b_maja_b3 * gain_true,
                             b_maja_b2 * gain_true)),
                                   interpolation='none',
                                   aspect='equal')
                        axs.set_title("MAJA Quicklook (B4, B3, B2)")
                        axs.contour(m_maja_qa)

                        pl.savefig(location_name + '_' + timestamp +
                                   '_Maja_quicklooks.png')
                        pl.close('all')

                        fig, axs = pl.subplots(figsize=[12, 12])
                        pl.title(location_name + ' ' + timestamp[0:4] + '-' +
                                 timestamp[4:6] + '-' + timestamp[6:8],
                                 fontsize=16)

                        axs.imshow(np.dstack(
                            (b_ref_b4 * gain_true, b_ref_b3 * gain_true,
                             b_ref_b2 * gain_true)),
                                   interpolation='none',
                                   aspect='equal')
                        axs.set_title("REF Quicklook (B4, B3, B2)")
                        axs.contour(m_ref_qa)

                        pl.savefig(location_name + '_' + timestamp +
                                   '_Ref_quicklooks.png')
                        pl.close('all')
                    except:
                        print(sys.exc_info())

            except:
                e = sys.exc_info()
                logger.error(e)

    b_common_stats_dataset_count = 0

    b_ref_stats_dataset_with_any_sr_lt0_count = 0
    b_ref_stats_dataset_with_atmost_025prc_sr_lt0_count = 0
    b_ref_stats_dataset_with_atmost_05prc_sr_lt0_count = 0
    b_ref_stats_dataset_with_atmost_10prc_sr_lt0_count = 0
    b_ref_stats_dataset_with_morethan_10prc_sr_lt0_count = 0

    b_maja_stats_dataset_with_any_sr_lt0_count = 0
    b_maja_stats_dataset_with_atmost_025prc_sr_lt0_count = 0
    b_maja_stats_dataset_with_atmost_05prc_sr_lt0_count = 0
    b_maja_stats_dataset_with_atmost_10prc_sr_lt0_count = 0
    b_maja_stats_dataset_with_morethan_10prc_sr_lt0_count = 0

    for l in range(len(l_stats)):
        b_common_stats_dataset_count += 1
        if (l_stats[l][5] > 0) or (l_stats[l][6] > 0) or (
                l_stats[l][7] > 0) or (l_stats[l][8] > 0):
            b_ref_stats_dataset_with_any_sr_lt0_count += 1

            if max(l_stats[l][5], l_stats[l][6], l_stats[l][7],
                   l_stats[l][8]) <= 20250:
                logger.info(
                    "STATS REF_LT025: %s: b2_ratio=%i/%i, b3_ratio=%i/%i, b4_ratio=%i/%i, b8_ratio=%i/%i"
                    % (l_stats[l][0], l_stats[l][5], l_stats[l][1],
                       l_stats[l][6], l_stats[l][2], l_stats[l][7],
                       l_stats[l][3], l_stats[l][8], l_stats[l][4]))
                b_ref_stats_dataset_with_atmost_025prc_sr_lt0_count += 1

            elif max(l_stats[l][5], l_stats[l][6], l_stats[l][7],
                     l_stats[l][8]) <= 40500:
                logger.info(
                    "STATS REF_LT05: %s: b2_ratio=%i/%i, b3_ratio=%i/%i, b4_ratio=%i/%i, b8_ratio=%i/%i"
                    % (l_stats[l][0], l_stats[l][5], l_stats[l][1],
                       l_stats[l][6], l_stats[l][2], l_stats[l][7],
                       l_stats[l][3], l_stats[l][8], l_stats[l][4]))
                b_ref_stats_dataset_with_atmost_05prc_sr_lt0_count += 1

            elif max(l_stats[l][5], l_stats[l][6], l_stats[l][7],
                     l_stats[l][8]) <= 81000:
                b_ref_stats_dataset_with_atmost_10prc_sr_lt0_count += 1
                logger.info(
                    "STATS REF_LT10: %s: b2_ratio=%i/%i, b3_ratio=%i/%i, b4_ratio=%i/%i, b8_ratio=%i/%i"
                    % (l_stats[l][0], l_stats[l][5], l_stats[l][1],
                       l_stats[l][6], l_stats[l][2], l_stats[l][7],
                       l_stats[l][3], l_stats[l][8], l_stats[l][4]))

            else:
                b_ref_stats_dataset_with_morethan_10prc_sr_lt0_count += 1
                logger.info(
                    "STATS REF_GT10: %s: b2_ratio=%i/%i, b3_ratio=%i/%i, b4_ratio=%i/%i, b8_ratio=%i/%i"
                    % (l_stats[l][0], l_stats[l][5], l_stats[l][1],
                       l_stats[l][6], l_stats[l][2], l_stats[l][7],
                       l_stats[l][3], l_stats[l][8], l_stats[l][4]))

        if (l_stats[l][9] > 0) or (l_stats[l][10] > 0) or (
                l_stats[l][11] > 0) or (l_stats[l][12] > 0):
            b_maja_stats_dataset_with_any_sr_lt0_count += 1

            if max(l_stats[l][9], l_stats[l][10], l_stats[l][11],
                   l_stats[l][12]) <= 20250:
                logger.info(
                    "STATS MAJA_LT025: %s: b2_ratio=%i/%i, b3_ratio=%i/%i, b4_ratio=%i/%i, b8_ratio=%i/%i"
                    % (l_stats[l][0], l_stats[l][9], l_stats[l][1],
                       l_stats[l][10], l_stats[l][2], l_stats[l][11],
                       l_stats[l][3], l_stats[l][12], l_stats[l][4]))
                b_maja_stats_dataset_with_atmost_025prc_sr_lt0_count += 1

            elif max(l_stats[l][9], l_stats[l][10], l_stats[l][11],
                     l_stats[l][12]) <= 40500:
                logger.info(
                    "STATS LT05: %s: b2_ratio=%i/%i, b3_ratio=%i/%i, b4_ratio=%i/%i, b8_ratio=%i/%i"
                    % (l_stats[l][0], l_stats[l][9], l_stats[l][1],
                       l_stats[l][10], l_stats[l][2], l_stats[l][11],
                       l_stats[l][3], l_stats[l][12], l_stats[l][4]))
                b_maja_stats_dataset_with_atmost_05prc_sr_lt0_count += 1

            elif max(l_stats[l][9], l_stats[l][10], l_stats[l][11],
                     l_stats[l][12]) <= 81000:
                b_maja_stats_dataset_with_atmost_10prc_sr_lt0_count += 1
                logger.info(
                    "STATS LT10: %s: b2_ratio=%i/%i, b3_ratio=%i/%i, b4_ratio=%i/%i, b8_ratio=%i/%i"
                    % (l_stats[l][0], l_stats[l][9], l_stats[l][1],
                       l_stats[l][10], l_stats[l][2], l_stats[l][11],
                       l_stats[l][3], l_stats[l][12], l_stats[l][4]))

            else:
                b_maja_stats_dataset_with_morethan_10prc_sr_lt0_count += 1
                logger.info(
                    "STATS GT10: %s: b2_ratio=%i/%i, b3_ratio=%i/%i, b4_ratio=%i/%i, b8_ratio=%i/%i"
                    % (l_stats[l][0], l_stats[l][9], l_stats[l][1],
                       l_stats[l][10], l_stats[l][2], l_stats[l][11],
                       l_stats[l][3], l_stats[l][12], l_stats[l][4]))

    logger.info("STATS: Tested %i location and timestamps" %
                b_common_stats_dataset_count)
    logger.info("STATS: Found %i datasets with sr_ref < 0" %
                b_ref_stats_dataset_with_any_sr_lt0_count)
    logger.info("STATS:     %i datasets with at most 2.5%% of sr_ref < 0" %
                b_ref_stats_dataset_with_atmost_025prc_sr_lt0_count)
    logger.info("STATS:     %i datasets with 2.5%% to 5%% of sr_ref < 0" %
                b_ref_stats_dataset_with_atmost_05prc_sr_lt0_count)
    logger.info("STATS:     %i datasets with 5%% to 10%% of sr_ref < 0" %
                b_ref_stats_dataset_with_atmost_10prc_sr_lt0_count)
    logger.info("STATS:     %i datasets with more than 10%% of sr_ref < 0" %
                b_ref_stats_dataset_with_morethan_10prc_sr_lt0_count)

    logger.info("STATS: Found %i datasets with sr_maja < 0" %
                b_maja_stats_dataset_with_any_sr_lt0_count)
    logger.info("STATS:     %i datasets with at most 2.5%% of sr_maja < 0" %
                b_maja_stats_dataset_with_atmost_025prc_sr_lt0_count)
    logger.info("STATS:     %i datasets with 2.5%% to 5%% of sr_maja < 0" %
                b_maja_stats_dataset_with_atmost_05prc_sr_lt0_count)
    logger.info("STATS:     %i datasets with 5%% to 10%% of sr_maja < 0" %
                b_maja_stats_dataset_with_atmost_10prc_sr_lt0_count)
    logger.info("STATS:     %i datasets with more than 10%% of sr_maja < 0" %
                b_maja_stats_dataset_with_morethan_10prc_sr_lt0_count)

    print(b_common_stats_dataset_count,
          b_ref_stats_dataset_with_any_sr_lt0_count,
          b_ref_stats_dataset_with_atmost_10prc_sr_lt0_count)
    print(l_stats)

    sys.exit(0)