예제 #1
0
    def post(self, request, *args, **kwargs):
        # 商户订单号
        # out_trade_no = request.data.get('out_trade_no')
        out_trade_no = '20200815143500475713'
        # 获取小程序用户唯一标识openid
        openid = models.UserInfo.objects.get(pk=request.user.id).openid
        # 请求微信的url
        order_url = "https://api.mch.weixin.qq.com/pay/orderquery"
        # 拿到封装好的xml数据
        body_data = wxpay.get_bodyData(orderType='queryset',
                                       out_trade_no=out_trade_no)

        # 请求微信接口下单
        response = requests.post(order_url,
                                 body_data.encode("utf-8"),
                                 headers={'Content-Type': 'application/xml'})
        # 回复数据为xml,将其转为字典
        x = XML2Dict()
        content = x.parse(response.content)['xml']
        print(content)

        if content['return_code'].decode('UTF-8') == 'SUCCESS':
            trade_state = content['trade_state'].decode('UTF-8')
            time_end = content['time_end'].decode('UTF-8')
            # 封装返回给前端的数据
            data = {'trade_state': trade_state, 'time_end': time_end}
            print(data)
            return Response(data, status=HTTP_200_OK)

        else:
            return HttpResponse("查询支付失败")
예제 #2
0
def xml2dict(xml):
    """
    将xml转换成dict
    :param xml: xml格式字符串
    :return:
    """
    return XML2Dict().parse(xml).get('xml')
예제 #3
0
    def post(self, request, *args, **kwargs):
        user_id = self.request.user.id
        if models.Order.objects.filter(user=user_id,
                                       order_status__in=[1, 2]).count() > 1:
            return Response(status=HTTP_204_NO_CONTENT)
        # 获取价格
        price = int(request.data.get("price")) * 100

        # 获取小程序用户唯一标识openid
        openid = models.UserInfo.objects.get(pk=request.user.id).openid
        out_trade_no = request.data['out_trade_no']
        print(out_trade_no)

        if out_trade_no is None:  # 如果未传入out_trade_no说明是新订单
            out_trade_no = getWxPayOrderID()  # 商户订单号

        # 请求微信的url
        order_url = "https://api.mch.weixin.qq.com/pay/unifiedorder"

        # 拿到封装好的xml数据
        body_data = wxpay.get_bodyData(orderType='prepay',
                                       openid=openid,
                                       price=price,
                                       out_trade_no=out_trade_no)
        # 获取时间戳
        timestamp = str(int(time.time()))

        # 请求微信接口下单
        response = requests.post(order_url,
                                 body_data.encode("utf-8"),
                                 headers={'Content-Type': 'application/xml'})
        # 回复数据为xml,将其转为字典
        x = XML2Dict()
        content = x.parse(response.content)['xml']
        for key in content:
            content[key] = content[key].decode('UTF-8')
        print(content)

        if content['return_code'] == 'SUCCESS':
            # 获取预支付交易会话标识
            prepay_id = content['prepay_id']
            # 获取随机字符串
            nonceStr = content['nonce_str']

            # 获取paySign签名,这个需要我们根据拿到的prepay_id和nonceStr进行计算签名
            paySign = wxpay.get_paysign(prepay_id, timestamp, nonceStr)
            # 封装返回给前端的数据
            data = {
                "prepay_id": prepay_id,
                "nonceStr": nonceStr,
                "paySign": paySign,
                "timestamp": timestamp,
                "out_trade_no": out_trade_no
            }

            return Response(data, status=HTTP_200_OK)

        else:
            return HttpResponse("请求支付失败")
예제 #4
0
    def parse_config_file(self):
        logging.debug('Into function parse_config_file')
        with open(self._config_file, 'r') as f:
            _xml_str = f.read()

        try:
            _obj = XML2Dict(coding='utf-8')
            self.config_dict = _obj.parse(_xml_str)
            logging.debug('config_dict : %s' % pformat(self.config_dict))
            logging.info('Parse config file done')
            return self.config_dict
        except Exception, e:
            logging.error("Can't parse as XML!")
            logging.exception(e)
            sys.exit(1)
def trans_xml_to_dict(xml):
    """
    将微信支付交互返回的 XML 格式数据转化为 Python Dict 对象

    :param xml: 原始 XML 格式数据
    :return: dict 对象
    """

    # soup = BeautifulSoup(xml, features='xml')
    # xml = soup.find('xml')
    # if not xml:
    #     return {}

    # 将 XML 数据转化为 Dict
    # msg = {}
    # root_elem = ET.fromstring(xml)
    # print(root_elem)
    # for ch in root_elem:
    #     msg[ch.tag] = ch.text
    # return msg

    x = XML2Dict()
    d = x.parse(xml)
    return d
예제 #6
0
def weixinrec():
    if request.method == 'GET':
        return request.args.get('echostr')
    xml2dict = XML2Dict(coding='utf-8')
    xml = xml2dict.parse(request.data)
    vo = xml['xml']

    if vo.has_key('MsgType'):
        if vo['MsgType'] == 'text':
            if testCommand.has_key(vo['Content']):

                retUrl = '没有对应的返回值'
                v = str(testCommand[vo['Content']])
                if (v.find('%s') != -1):
                    retUrl = str(
                        testCommand[vo['Content']]) % vo['FromUserName']
                else:
                    retUrl = str(testCommand[vo['Content']])

                retXML = ''
                retXML += '<xml>'
                retXML += '<ToUserName><![CDATA[' + vo[
                    'FromUserName'] + ']]></ToUserName>'
                retXML += '<FromUserName><![CDATA[' + vo[
                    'ToUserName'] + ']]></FromUserName>'
                retXML += '<CreateTime>' + str(int(
                    sysTime.time() * 1000)) + '</CreateTime>'
                retXML += '<MsgType><![CDATA[text]]></MsgType>'
                retXML += '<Content><![CDATA[' + retUrl + ']]></Content>'
                retXML += '</xml>'
                retXML = retXML.replace('&lt;', '<').replace('&gt;', '>')
                return retXML

    if vo.has_key('Event') and vo.has_key('EventKey'):
        if vo['Event'] == 'subscribe':
            retXML = ''
            retXML += '<xml>'
            retXML += '<ToUserName><![CDATA[' + vo[
                'FromUserName'] + ']]></ToUserName>'
            retXML += '<FromUserName><![CDATA[' + vo[
                'ToUserName'] + ']]></FromUserName>'
            retXML += '<CreateTime>' + str(int(
                sysTime.time() * 1000)) + '</CreateTime>'
            retXML += '<MsgType><![CDATA[text]]></MsgType>'
            retXML += '<Content><![CDATA[欢迎关注厦门国贸!]]></Content>'
            retXML += '</xml>'
            retXML = retXML.replace('&lt;', '<').replace('&gt;', '>')
            return retXML
        if vo['Event'] == 'CLICK':
            doc = minidom.Document()
            root = doc.createElement('xml')
            doc.appendChild(root)
            toUserName = doc.createElement('ToUserName')
            toUserName.appendChild(
                doc.createTextNode('<![CDATA[' + vo['FromUserName'] + ']]>'))
            root.appendChild(toUserName)

            fromUserName = doc.createElement('FromUserName')
            fromUserName.appendChild(
                doc.createTextNode('<![CDATA[' + vo['ToUserName'] + ']]>'))
            root.appendChild(fromUserName)

            createTime = doc.createElement('CreateTime')
            createTime.appendChild(
                doc.createTextNode(str(int(sysTime.time() * 1000))))
            root.appendChild(createTime)

            msgType = doc.createElement('MsgType')
            msgType.appendChild(doc.createTextNode('<![CDATA[news]]>'))
            root.appendChild(msgType)

            articles = doc.createElement('Articles')

            for k, group in groups.iteritems():
                if group['name'] == vo['EventKey']:
                    menuHeader = doc.createElement('item')
                    headerTitle = doc.createElement('Title')
                    headerTitle.appendChild(
                        doc.createTextNode('<![CDATA[' + group['title'] +
                                           ']]>'))
                    menuHeader.appendChild(headerTitle)

                    headerPicUrl = doc.createElement('PicUrl')
                    headerPicUrl.appendChild(
                        doc.createTextNode('<![CDATA[' + group['picurl'] +
                                           ']]>'))
                    menuHeader.appendChild(headerPicUrl)

                    articles.appendChild(menuHeader)
                    articleNum = 1
                    for k, menu in menus.iteritems():
                        if menu['group'] == group:
                            menuitem = doc.createElement('item')

                            itemTitle = doc.createElement('Title')
                            itemTitle.appendChild(
                                doc.createTextNode('<![CDATA[' +
                                                   menu['title'] + ']]>'))
                            menuitem.appendChild(itemTitle)

                            itemUrl = doc.createElement('Url')
                            itemUrl.appendChild(
                                doc.createTextNode('<![CDATA[' +
                                                   (menu['url'] %
                                                    vo['FromUserName']) +
                                                   ']]>'))
                            menuitem.appendChild(itemUrl)

                            itemPicUrl = doc.createElement('PicUrl')
                            itemPicUrl.appendChild(
                                doc.createTextNode('<![CDATA[' +
                                                   menu['picurl'] + ']]>'))
                            menuitem.appendChild(itemPicUrl)

                            articles.appendChild(menuitem)
                            articleNum = articleNum + 1

                    articleCount = doc.createElement('ArticleCount')
                    articleCount.appendChild(
                        doc.createTextNode(str(articleNum)))
                    root.appendChild(articleCount)
                    root.appendChild(articles)
                    xml = str(doc.toxml())
                    # xml = pickle.loads(xml)
                    xml = xml.replace('&lt;', '<').replace('&gt;', '>')
                    return xml
    return 'error'
예제 #7
0
    def handle(self, *args, **options):

        guids = [
            e.auto_generated_source for e in Event.objects.filter(
                auto_generated_source__startswith='www.who.int')
        ]

        logger.info('Querying WHO RSS feed for new emergency data')
        # get latest
        nspace = '{https://www.who.int}'
        url = 'https://www.who.int/feeds/entity/csr/don/en/rss.xml'

        response = requests.get(url)
        if response.status_code != 200:
            logger.error('Error querying WHO xml feed at ' + url)
            logger.error(response.content)
            raise Exception('Error querying WHO')

        # get as XML
        xml2dict = XML2Dict()
        results = xml2dict.parse(response.content)
        added = 0
        # import pdb; pdb.set_trace()
        lastBuildDate = results['rss']['channel']['lastBuildDate']
        managingEditor = results['rss']['channel']['managingEditor']
        for row in results['rss']['channel']['item']:
            data = {
                'title': row.pop('title'),
                'link': row.pop('link'),
                'description': row.pop('description'),
                'guid': row.pop('guid'),
                #               '@guid': row.pop('@guid'),  #can not be popped twice
                'isPermaLink': row.pop('@guid').pop('isPermaLink'),
                'category': row.pop('category'),
                'pubDate': row.pop('pubDate'),
            }
            if data['guid'].decode("utf-8") in guids:
                continue
            if data['guid'].decode("utf-8") in [
                    'WeDontWantThis', 'NeitherThis'
            ]:
                continue

#            alert_level = alert['%salertlevel' % nspace].decode('utf-8')
#            if alert_level in levels.keys():
#                latlon = alert['{http://www.georss.org/georss}point'].decode('utf-8').split()
#                eid = alert.pop(nspace + 'eventid')
#                alert_score = alert[nspace + 'alertscore'] if (nspace + 'alertscore') in alert else None
#                data = {
#                    'title': alert.pop('title'),
#                    'description': alert.pop('description'),
#                    'image': alert.pop('enclosure'),
#                    'report': alert.pop('link'),
#                    'publication_date': parse(alert.pop('pubDate')),
#                    'year': alert.pop(nspace + 'year'),
#                    'lat': latlon[0],
#                    'lon': latlon[1],
#                    'event_type': alert.pop(nspace + 'eventtype'),
#                    'alert_level': levels[alert_level],
#                    'alert_score': alert_score,
#                    'severity': alert.pop(nspace + 'severity'),
#                    'severity_unit': alert['@' + nspace + 'severity']['unit'],
#                    'severity_value': alert['@' + nspace + 'severity']['value'],
#                    'population_unit': alert['@' + nspace + 'population']['unit'],
#                    'population_value': alert['@' + nspace + 'population']['value'],
#                    'vulnerability': alert['@' + nspace + 'vulnerability']['value'],
#                    'country_text': alert.pop(nspace + 'country'),
#                }
#
#                # do some length checking
#                for key in ['event_type', 'alert_score', 'severity_unit', 'severity_value', 'population_unit', 'population_value']:
#                    if len(data[key]) > 16:
#                        data[key] = data[key][:16]
#                data = {k: v.decode('utf-8') if isinstance(v, bytes) else v for k, v in data.items()}
#                gdacsevent, created = GDACSEvent.objects.get_or_create(eventid=eid, defaults=data)
#                if created:
#                    added += 1
#                    for c in data['country_text'].split(','):
#                        country = Country.objects.filter(name=c.strip())
#                        if country.count() == 1:
#                            gdacsevent.countries.add(country[0])
#
#                    title_elements = ['GDACS %s:' % alert_level]
#                    for field in ['country_text', 'event_type', 'severity']:
#                        if data[field] is not None:
#                            title_elements.append(str(data[field]))
#                    title = (' ').join(title_elements)
#
            title = data['title'].decode("utf-8")
            pos = title.find(' – ')
            if pos == -1:
                pos = title.find(' - ')
            if pos > 0:
                country = title[pos +
                                3:]  # cutting the part after " – " or " - "
            else:
                country = 'DashNotFoundInTitle'
            if country == 'Democratic Republic of the Congo':  #replacement
                country = 'Congo, Dem. Rep.'
            elif country == 'Argentine Republic':
                country = 'Argentina'
            elif country == 'Republic of Panama':
                country = 'Panama'
            elif country == 'Islamic Republic of Pakistan':
                country = 'Pakistan'
            # make sure we don't exceed the 100 character limit
            if len(title) > 99:
                title = '%s...' % title[:99]
            date = parse(data['pubDate'].decode("utf-8"))
            if data['category'].decode("utf-8") == 'news':
                alert_level = 1
            else:
                alert_level = 2
            if "Ebola" in title:
                alert_level = 2

            fields = {
                'name': title,
                'summary': data['description'].decode("utf-8"),
                'disaster_start_date': date,
                'auto_generated': True,
                'auto_generated_source': data['guid'].decode("utf-8"),
                'ifrc_severity_level': alert_level,
            }
            event = Event.objects.create(**fields)
            added += 1

            # add country
            country_found = Country.objects.filter(name=country.strip())
            if country_found.count() >= 1:
                event.countries.add(country_found[0])
            else:
                country_word_list = country.split()  # list of country words
                country_found = Country.objects.filter(
                    name=country_word_list[-1].strip()
                )  # Search only the last word, like "Republic of Panama" > "Panama"
                if country_found.count() >= 1:
                    event.countries.add(country_found[0])

        logger.info('%s WHO messages added' % added)
예제 #8
0
    def handle(self, *args, **options):
        logger.info('Starting GDACs ingest')
        # get latest
        nspace = '{http://www.gdacs.org}'
        url = 'http://www.gdacs.org/xml/rss_7d.xml'

        response = requests.get(url)
        if response.status_code != 200:
            logger.error(
                'Error querying GDACS xml feed at http://www.gdacs.org/xml/rss_7d.xml'
            )
            logger.error(response.content)
            raise Exception('Error querying GDACS')

        # get as XML
        xml2dict = XML2Dict()
        results = xml2dict.parse(response.content)
        levels = {'Orange': 1, 'Red': 2}
        added = 0
        for alert in results['rss']['channel']['item']:
            alert_level = alert['%salertlevel' % nspace].decode('utf-8')
            if alert_level in levels.keys():
                latlon = alert['{http://www.georss.org/georss}point'].decode(
                    'utf-8').split()
                eid = alert.pop(nspace + 'eventid')
                alert_score = alert[nspace + 'alertscore'] if (
                    nspace + 'alertscore') in alert else None
                data = {
                    'title': alert.pop('title'),
                    'description': alert.pop('description'),
                    'image': alert.pop('enclosure'),
                    'report': alert.pop('link'),
                    'publication_date': parse(alert.pop('pubDate')),
                    'year': alert.pop(nspace + 'year'),
                    'lat': latlon[0],
                    'lon': latlon[1],
                    'event_type': alert.pop(nspace + 'eventtype'),
                    'alert_level': levels[alert_level],
                    'alert_score': alert_score,
                    'severity': alert.pop(nspace + 'severity'),
                    'severity_unit': alert['@' + nspace + 'severity']['unit'],
                    'severity_value':
                    alert['@' + nspace + 'severity']['value'],
                    'population_unit':
                    alert['@' + nspace + 'population']['unit'],
                    'population_value':
                    alert['@' + nspace + 'population']['value'],
                    'vulnerability':
                    alert['@' + nspace + 'vulnerability']['value'],
                    'country_text': alert.pop(nspace + 'country'),
                }

                # do some length checking
                for key in [
                        'event_type', 'alert_score', 'severity_unit',
                        'severity_value', 'population_unit', 'population_value'
                ]:
                    if len(data[key]) > 16:
                        data[key] = data[key][:16]
                data = {
                    k: v.decode('utf-8') if isinstance(v, bytes) else v
                    for k, v in data.items()
                }
                gdacsevent, created = GDACSEvent.objects.get_or_create(
                    eventid=eid, defaults=data)
                if created:
                    added += 1
                    for c in data['country_text'].split(','):
                        country = Country.objects.filter(name=c.strip())
                        if country.count() == 1:
                            gdacsevent.countries.add(country[0])

                    title_elements = ['GDACS %s:' % alert_level]
                    for field in ['country_text', 'event_type', 'severity']:
                        if data[field] is not None:
                            title_elements.append(str(data[field]))
                    title = (' ').join(title_elements)

                    # make sure we don't exceed the 100 character limit
                    if len(title) > 97:
                        title = '%s...' % title[:97]

                    fields = {
                        'name': title,
                        'summary': data['description'],
                        'disaster_start_date': data['publication_date'],
                        'auto_generated': True,
                        'auto_generated_source': SOURCES['gdacs'],
                        'ifrc_severity_level': data['alert_level'],
                    }
                    event = Event.objects.create(**fields)
                    # add countries
                    [
                        event.countries.add(c)
                        for c in gdacsevent.countries.all()
                    ]

        logger.info('%s GDACs events added' % added)
예제 #9
0
파일: draw4.py 프로젝트: sdoom/neural
import numpy
# scipy.special for the sigmoid function expit()
import scipy.special
# neural network class definition
from encoder import XML2Dict
#coding=utf-8

xml = XML2Dict()


class xmlInit:
    def __init__(self, file, the_dict):
        self.file = file
        self.the_dict = the_dict

    def handle(self):
        with open(self.file, 'r', encoding='utf-8') as bar:
            foo = bar.read()
            self.the_dict = xml.parse(foo)
            bar.close()


#the_dict =
#YX = xmlInit('YXDM.xml',[])
#YX.handle()
#for x in YX.the_dict['root']['item']:
#print(x)


class neuralNetwork:
예제 #10
0
def upd_bing_links():
    # Bug backup: prevent requests in clearing the list to get blank content

    from logger.log import Logger
    debug_logger = Logger(logger_name="bing_refresh")

    print("[Bing-Image] Start Bing-Image URL Fetching...")

    from encoder import XML2Dict
    xml = XML2Dict()

    def fetch():

        bing_image.all_image_meta = [
            BingImage(
                "https://cn.bing.com/az/hprichbg/rb/WindmillLighthouse_JA-JP3858962830_1920x1080.jpg",
                "暂无图片说明", "https://bing.com"),
            BingImage(
                "https://cn.bing.com/az/hprichbg/rb/WinterLynx_ZH-CN7158207296_1920x1080.jpg",
                "波希米亚摩拉维亚高地的猞猁,捷克 (© sduben/Getty Images Plus)",
                "https://www.bing.com/search?q=%E7%8C%9E%E7%8C%81&form=hpcapt&mkt=zh-cn"
            )
        ]

        request_url_1 = "https://cn.bing.com/HPImageArchive.aspx?idx=-1&n=9"
        request_url_2 = "https://cn.bing.com/HPImageArchive.aspx?idx=7&n=9"

        result1 = xml.parse(requests.get(request_url_1).text)
        result2 = xml.parse(requests.get(request_url_2).text)

        all_img_json = result1["images"]["image"]
        for item in result2["images"]["image"]:
            all_img_json.append(item)

        index = 0

        for now_img in all_img_json:
            description = str(now_img["copyright"]).replace("b'", "").replace(
                "'", "")
            copyright_link = str(now_img["copyrightlink"]).replace("b'",
                                                                   "").replace(
                                                                       "'", "")
            url_base = str(now_img["urlBase"]).replace("b'",
                                                       "").replace("'", "")
            full_image_url = "https://cn.bing.com" + url_base + "_1920x1080.jpg"

            img = BingImage(full_image_url, description, copyright_link)
            bing_image.all_image_meta.append(img)

            print("    [0" + str(index) + "] " + full_image_url)
            index += 1

        bing_image.all_image_meta.pop(0)
        bing_image.all_image_meta.pop(1)

    try:
        fetch()
    except requests.exceptions.ConnectionError as e:
        try:
            # On error, re-fetch.
            fetch()
        except requests.exceptions.ConnectionError as e2:
            print(
                "[Bing-Image] Fetching BingImage failed due to ConnectionError."
            )

    print("[Bing-Image] Refreshed. Picture Count: " +
          str(len(bing_image.all_image_meta)))
예제 #11
0
    def handle(self, *args, **options):

        guids = [
            e.auto_generated_source for e in Event.objects.filter(
                auto_generated_source__startswith='www.who.int')
        ]

        logger.info('Querying WHO RSS feed for new emergency data')
        # get latest
        nspace = '{https://www.who.int}'
        ur2 = []
        ur2.append('https://www.who.int/feeds/entity/csr/don/en/rss.xml')
        ur2.append('https://www.who.int/feeds/entity/hac/en/rss.xml')

        for index, url in enumerate(ur2):
            response = requests.get(url)
            if response.status_code != 200:
                text_to_log = 'Error querying WHO xml feed at ' + url
                logger.error(text_to_log)
                logger.error(response.content)
                body = {
                    "name": "ingest_who",
                    "message": text_to_log,
                    "status": CronJobStatus.ERRONEOUS
                }  # not every case is catched here, e.g. if the base URL is wrong...
                CronJob.sync_cron(body)
                raise Exception('Error querying WHO')

            # get as XML
            xml2dict = XML2Dict()
            results = xml2dict.parse(response.content)
            added = 0
            lastBuildDate = results['rss']['channel']['lastBuildDate']
            managingEditor = results['rss']['channel']['managingEditor']

            for row in results['rss']['channel']['item']:
                data = {
                    'title': row.pop('title'),
                    'link': row.pop('link'),
                    'description': row.pop('description'),
                    'guid': row.pop('guid'),
                    #                   '@guid': row.pop('@guid'),  #can not be popped twice
                    'isPermaLink': row.pop('@guid').pop('isPermaLink'),
                    'category': row.pop('category'),
                    'pubDate': row.pop('pubDate'),
                }
                if data['guid'].decode("utf-8") in guids:
                    continue
                if data['guid'].decode("utf-8") in [
                        'WeDontWantThis', 'NeitherThis'
                ]:
                    continue

#                alert_level = alert['%salertlevel' % nspace].decode('utf-8')
#                if alert_level in levels.keys():
#                    latlon = alert['{http://www.georss.org/georss}point'].decode('utf-8').split()
#                    eid = alert.pop(nspace + 'eventid')
#                    alert_score = alert[nspace + 'alertscore'] if (nspace + 'alertscore') in alert else None
#                    data = {
#                        'title': alert.pop('title'),
#                        'description': alert.pop('description'),
#                        'image': alert.pop('enclosure'),
#                        'report': alert.pop('link'),
#                        'publication_date': parse(alert.pop('pubDate')),
#                        'year': alert.pop(nspace + 'year'),
#                        'lat': latlon[0],
#                        'lon': latlon[1],
#                        'event_type': alert.pop(nspace + 'eventtype'),
#                        'alert_level': levels[alert_level],
#                        'alert_score': alert_score,
#                        'severity': alert.pop(nspace + 'severity'),
#                        'severity_unit': alert['@' + nspace + 'severity']['unit'],
#                        'severity_value': alert['@' + nspace + 'severity']['value'],
#                        'population_unit': alert['@' + nspace + 'population']['unit'],
#                        'population_value': alert['@' + nspace + 'population']['value'],
#                        'vulnerability': alert['@' + nspace + 'vulnerability']['value'],
#                        'country_text': alert.pop(nspace + 'country'),
#                    }
#
#                    # do some length checking
#                    for key in ['event_type', 'alert_score', 'severity_unit', 'severity_value', 'population_unit', 'population_value']:
#                        if len(data[key]) > 16:
#                            data[key] = data[key][:16]
#                    data = {k: v.decode('utf-8') if isinstance(v, bytes) else v for k, v in data.items()}
#                    gdacsevent, created = GDACSEvent.objects.get_or_create(eventid=eid, defaults=data)
#                    if created:
#                        added += 1
#                        for c in data['country_text'].split(','):
#                            country = Country.objects.filter(name=c.strip())
#                            if country.count() == 1:
#                                gdacsevent.countries.add(country[0])
#
#                        title_elements = ['GDACS %s:' % alert_level]
#                        for field in ['country_text', 'event_type', 'severity']:
#                            if data[field] is not None:
#                                title_elements.append(str(data[field]))
#                        title = (' ').join(title_elements)
#
                title = data['title'].decode("utf-8")  # for csr link
                short = title.replace(' (ex-China)', '')
                pos = short.find(' – ')
                region = None
                country = None
                if pos == -1:
                    pos = short.find(' - ')
                if pos > 0:
                    country = short[
                        pos + 3:]  # cutting the part after " – " or " - "
                else:
                    country = 'DashNotFoundInTitle'
                if country == 'Democratic Republic of the Congo':  #replacement
                    country = 'Congo, Dem. Rep.'
                elif country == 'Argentine Republic':
                    country = 'Argentina'
                elif country == 'Republic of Panama':
                    country = 'Panama'
                elif country == 'Islamic Republic of Pakistan':
                    country = 'Pakistan'
                elif index == 1:  # for 'hac' category. See link for 'hac' above
                    hac_category = data['category'].decode("utf-8")

                    # Searching for the given country
                    end = hac_category.find('[country]')
                    if end > 0:
                        start = hac_category[:end - 1].rfind(
                            ',', 0)  # backwards search the comma
                        country = hac_category[
                            start + 2:end -
                            1]  # Getting the comma followed part from the category as Country
                    else:
                        country = 'CountryNotFoundInCategory'  # Will not be found via filtering
                    # Searching for the given region
                    end = hac_category.find('[region]')
                    if end > 0:
                        start = hac_category[:end - 1].rfind(
                            ',', 0)  # backwards search the comma
                        region_name = hac_category[
                            start + 2:end -
                            1]  # Getting the comma followed part from the category as Country
                        if 'Afr' in region_name:  # Keep synchronised with https://github.com/IFRCGo/go-api/blob/master/api/models.py#L38-L42
                            region = 0
                        elif 'Ame' in region_name:
                            region = 1
                        elif 'As' in region_name:
                            region = 2
                        elif 'Eu' in region_name:
                            region = 3
                        elif 'MENA' in region_name:
                            region = 4
                        else:  # search for region that is joined to country (later)...
                            region = None

                # make sure we don't exceed the 100 character limit
                if len(title) > 99:
                    title = '%s...' % title[:99]
                date = parse(data['pubDate'].decode("utf-8"))
                if data['category'].decode("utf-8") == 'news':
                    alert_level = 1
                else:
                    alert_level = 2
                if "Ebola" in title or "virus" in title or "fever" in title:
                    alert_level = 2
                elif index == 1:
                    alert_level = 0

                if data['category'].decode("utf-8") == 'news':
                    summary = data['description'].decode("utf-8")
                else:
                    summary = data['description'].decode(
                        "utf-8") + ' (' + data['category'].decode(
                            "utf-8") + ')'

                fields = {
                    'name': title,
                    'summary': summary,
                    'disaster_start_date': date,
                    'auto_generated': True,
                    'auto_generated_source': data['guid'].decode("utf-8"),
                    'ifrc_severity_level': alert_level,
                }
                # TODO: fields['name'] sometimes exceeds 100 maxlength, so will need some altering if this will be used
                event = Event.objects.create(**fields)
                added += 1

                # add country
                country_found = Country.objects.filter(name=country.strip())
                if country_found.count() >= 1:
                    event.countries.add(country_found[0])
                else:
                    country_word_list = country.split(
                    )  # list of country words
                    country_found = Country.objects.filter(
                        name=country_word_list[-1].strip()
                    )  # Search only the last word, like "Republic of Panama" > "Panama"
                    if country_found.count() >= 1:
                        event.countries.add(country_found[0])

                # add region
                # print(country)
                if (region is None) and (country_found.count() > 0) and (
                        country != 'CountryNotFoundInCategory'):
                    region = country_found[0].region_id
                if region is not None:
                    event.regions.add(region)

            text_to_log = "{} WHO messages added, URL-{}".format(
                added, index + 1)
            logger.info(text_to_log)

            # Database CronJob logging
            body = {
                "name": "ingest_who",
                "message": text_to_log,
                "num_result": added,
                "storing_days": 6,
                "status": CronJobStatus.SUCCESSFUL
            }

            #... via API - not using frosm here, but from front-end it can be useful:
            #resp = requests.post(api_url + '/api/v2/add_cronjob_log/', body, headers={'CONTENT_TYPE': 'application/json'})

            # ... via a direct write-in:
            CronJob.sync_cron(body)