예제 #1
0
def parse_base64_feed(base64_feed):
    decoded_feed = base64.b64decode(base64_feed)
    feed = json.loads(decoded_feed)
    if isinstance(feed, dict) and 'feed' in feed:
        errors = util.is_valid_json(feed['feed'], config.FEED_SCHEMA)
        if len(errors) > 0:
            raise Exception("Invalid json: {}".format(", ".join(errors)))
        # get broadcast infos
        params = {
            'filters': {
                'field': 'source',
                'op': '=',
                'value': feed['feed']['address']
            },
            'order_by': 'tx_index',
            'order_dir': 'DESC',
            'limit': 1
        }
        broadcasts = util.call_jsonrpc_api('get_broadcasts', params)['result']
        if len(broadcasts) == 0:
            raise Exception("invalid feed address")

        complete_feed = {}
        complete_feed['fee_fraction_int'] = broadcasts[0]['fee_fraction_int']
        complete_feed['source'] = broadcasts[0]['source']
        complete_feed['locked'] = broadcasts[0]['locked']
        complete_feed['counters'] = get_feed_counters(broadcasts[0]['source'])
        complete_feed['info_data'] = sanitize_json_data(feed['feed'])
        
        feed['feed'] = complete_feed
        return feed
예제 #2
0
def parse_base64_feed(base64_feed):
    decoded_feed = base64.b64decode(base64_feed)
    feed = json.loads(decoded_feed)
    if isinstance(feed, dict) and "feed" in feed:
        errors = util.is_valid_json(feed["feed"], config.FEED_SCHEMA)
        if len(errors) > 0:
            raise Exception("Invalid json: {}".format(", ".join(errors)))
        # get broadcast infos
        params = {
            "filters": {"field": "source", "op": "=", "value": feed["feed"]["address"]},
            "order_by": "tx_index",
            "order_dir": "DESC",
            "limit": 1,
        }
        broadcasts = util.call_jsonrpc_api("get_broadcasts", params)["result"]
        if len(broadcasts) == 0:
            raise Exception("invalid feed address")

        complete_feed = {}
        complete_feed["fee_fraction_int"] = broadcasts[0]["fee_fraction_int"]
        complete_feed["source"] = broadcasts[0]["source"]
        complete_feed["locked"] = broadcasts[0]["locked"]
        complete_feed["counters"] = get_feed_counters(broadcasts[0]["source"])
        complete_feed["info_data"] = sanitize_json_data(feed["feed"])

        feed["feed"] = complete_feed
        return feed
예제 #3
0
def process_asset_info(db, asset, info_data):
    # sanity check
    assert asset['info_status'] == 'needfetch'
    assert 'info_url' in asset
    assert util.is_valid_url(
        asset['info_url'],
        allow_no_protocol=True)  #already validated in the fetch

    errors = util.is_valid_json(info_data, config.ASSET_SCHEMA)

    if not isinstance(info_data, dict) or 'asset' not in info_data:
        errors.append('Invalid data format')
    elif asset['asset'] != info_data['asset']:
        errors.append('asset field does not match asset name')

    if len(errors) > 0:
        inc_fetch_retry(db, asset, new_status='invalid', errors=errors)
        return (False, errors)

    asset['info_status'] = 'valid'

    #fetch any associated images...
    #TODO: parallelize this 2nd level asset image fetching ... (e.g. just compose a list here, and process it in later on)
    if 'image' in info_data:
        info_data['valid_image'] = util.fetch_image(info_data['image'],
                                                    config.SUBDIR_ASSET_IMAGES,
                                                    asset['asset'],
                                                    fetch_timeout=5)

    asset['info_data'] = sanitize_json_data(info_data)
    db.asset_extended_info.save(asset)
    return (True, None)
예제 #4
0
def process_asset_info(db, asset, info_data):
    # sanity check
    assert asset['info_status'] == 'needfetch'
    assert 'info_url' in asset
    assert util.is_valid_url(asset['info_url'], allow_no_protocol=True) #already validated in the fetch

    errors = util.is_valid_json(info_data, config.ASSET_SCHEMA)
    
    if not isinstance(info_data, dict) or 'asset' not in info_data:
        errors.append('Invalid data format')
    elif asset['asset'] != info_data['asset']:
        errors.append('asset field does not match asset name')
   
    if len(errors) > 0:
        inc_fetch_retry(db, asset, new_status='invalid', errors=errors)
        return (False, errors) 

    asset['info_status'] = 'valid'

    #fetch any associated images...
    #TODO: parallelize this 2nd level asset image fetching ... (e.g. just compose a list here, and process it in later on)
    if 'image' in info_data:
        info_data['valid_image'] = util.fetch_image(info_data['image'], config.SUBDIR_ASSET_IMAGES, asset['asset'])
        
    asset['info_data'] = sanitize_json_data(info_data)
    db.asset_extended_info.save(asset)
    return (True, None)
예제 #5
0
def parse_base64_feed(base64_feed):
    decoded_feed = base64.b64decode(base64_feed)
    feed = json.loads(decoded_feed)
    if isinstance(feed, dict) and 'feed' in feed:
        errors = util.is_valid_json(feed['feed'], config.FEED_SCHEMA)
        if len(errors) > 0:
            raise Exception("Invalid json: {}".format(", ".join(errors)))
        # get broadcast infos
        params = {
            'filters': {
                'field': 'source',
                'op': '=',
                'value': feed['feed']['address']
            },
            'order_by': 'tx_index',
            'order_dir': 'DESC',
            'limit': 1
        }
        broadcasts = util.call_jsonrpc_api('get_broadcasts', params)['result']
        if len(broadcasts) == 0:
            raise Exception("invalid feed address")

        complete_feed = {}
        complete_feed['fee_fraction_int'] = broadcasts[0]['fee_fraction_int']
        complete_feed['source'] = broadcasts[0]['source']
        complete_feed['locked'] = broadcasts[0]['locked']
        complete_feed['counters'] = get_feed_counters(broadcasts[0]['source'])
        complete_feed['info_data'] = sanitize_json_data(feed['feed'])
        
        feed['feed'] = complete_feed
        return feed
예제 #6
0
def process_feed_info(db, feed, info_data):
    # sanity check
    assert feed['info_status'] == 'needfetch'
    assert 'info_url' in feed
    assert util.is_valid_url(
        feed['info_url'],
        allow_no_protocol=True)  #already validated in the fetch

    errors = util.is_valid_json(info_data, config.FEED_SCHEMA)

    if not isinstance(info_data, dict) or 'address' not in info_data:
        errors.append('Invalid data format')
    elif feed['source'] != info_data['address']:
        errors.append('Invalid address')

    if len(errors) > 0:
        inc_fetch_retry(db, feed, new_status='invalid', errors=errors)
        return (False, errors)

    feed['info_status'] = 'valid'

    #fetch any associated images...
    #TODO: parallelize this 2nd level feed image fetching ... (e.g. just compose a list here, and process it in later on)
    if 'image' in info_data:
        info_data['valid_image'] = util.fetch_image(info_data['image'],
                                                    config.SUBDIR_FEED_IMAGES,
                                                    feed['source'] + '_topic',
                                                    fetch_timeout=5)
    if 'operator' in info_data and 'image' in info_data['operator']:
        info_data['operator']['valid_image'] = util.fetch_image(
            info_data['operator']['image'],
            config.SUBDIR_FEED_IMAGES,
            feed['source'] + '_owner',
            fetch_timeout=5)
    if 'targets' in info_data:
        for i in range(len(info_data['targets'])):
            if 'image' in info_data['targets'][i]:
                image_name = feed['source'] + '_tv_' + str(
                    info_data['targets'][i]['value'])
                info_data['targets'][i]['valid_image'] = util.fetch_image(
                    info_data['targets'][i]['image'],
                    config.SUBDIR_FEED_IMAGES,
                    image_name,
                    fetch_timeout=5)

    feed['info_data'] = sanitize_json_data(info_data)
    db.feeds.save(feed)
    return (True, None)
예제 #7
0
def process_feed_info(db, feed, info_data):
    # sanity check
    assert feed["info_status"] == "needfetch"
    assert "info_url" in feed
    assert util.is_valid_url(feed["info_url"], allow_no_protocol=True)  # already validated in the fetch

    errors = util.is_valid_json(info_data, config.FEED_SCHEMA)

    if not isinstance(info_data, dict) or "address" not in info_data:
        errors.append("Invalid data format")
    elif feed["source"] != info_data["address"]:
        errors.append("Invalid address")

    if len(errors) > 0:
        inc_fetch_retry(db, feed, new_status="invalid", errors=errors)
        return (False, errors)

    feed["info_status"] = "valid"

    # fetch any associated images...
    # TODO: parallelize this 2nd level feed image fetching ... (e.g. just compose a list here, and process it in later on)
    if "image" in info_data:
        info_data["valid_image"] = util.fetch_image(
            info_data["image"], config.SUBDIR_FEED_IMAGES, feed["source"] + "_topic", fetch_timeout=5
        )
    if "operator" in info_data and "image" in info_data["operator"]:
        info_data["operator"]["valid_image"] = util.fetch_image(
            info_data["operator"]["image"], config.SUBDIR_FEED_IMAGES, feed["source"] + "_owner", fetch_timeout=5
        )
    if "targets" in info_data:
        for i in range(len(info_data["targets"])):
            if "image" in info_data["targets"][i]:
                image_name = feed["source"] + "_tv_" + str(info_data["targets"][i]["value"])
                info_data["targets"][i]["valid_image"] = util.fetch_image(
                    info_data["targets"][i]["image"], config.SUBDIR_FEED_IMAGES, image_name, fetch_timeout=5
                )

    feed["info_data"] = sanitize_json_data(info_data)
    db.feeds.save(feed)
    return (True, None)
예제 #8
0
def process_feed_info(db, feed, info_data):
    # sanity check
    assert feed['info_status'] == 'needfetch'
    assert 'info_url' in feed
    assert util.is_valid_url(feed['info_url'], allow_no_protocol=True) #already validated in the fetch

    errors = util.is_valid_json(info_data, config.FEED_SCHEMA)
    
    if not isinstance(info_data, dict) or 'address' not in info_data:
        errors.append('Invalid data format')
    elif feed['source'] != info_data['address']:
        errors.append('Invalid address')
   
    if len(errors) > 0:
        inc_fetch_retry(db, feed, new_status='invalid', errors=errors)
        return (False, errors) 

    feed['info_status'] = 'valid'

    #fetch any associated images...
    #TODO: parallelize this 2nd level feed image fetching ... (e.g. just compose a list here, and process it in later on)
    if 'image' in info_data:
        info_data['valid_image'] = util.fetch_image(info_data['image'],
            config.SUBDIR_FEED_IMAGES, feed['source'] + '_topic', fetch_timeout=5)
    if 'operator' in info_data and 'image' in info_data['operator']:
        info_data['operator']['valid_image'] = util.fetch_image(info_data['operator']['image'],
            config.SUBDIR_FEED_IMAGES, feed['source'] + '_owner', fetch_timeout=5)
    if 'targets' in info_data:
        for i in range(len(info_data['targets'])):
            if 'image' in info_data['targets'][i]:
                image_name = feed['source'] + '_tv_' + str(info_data['targets'][i]['value'])
                info_data['targets'][i]['valid_image'] = util.fetch_image(
                    info_data['targets'][i]['image'], config.SUBDIR_FEED_IMAGES, image_name, fetch_timeout=5)

    feed['info_data'] = sanitize_json_data(info_data)
    db.feeds.save(feed)
    return (True, None)