def retrieve_bokelai_detail(self, bokelai_id, log, result_queue, timeout):

        detail_url = self.BOKELAI_DETAIL_URL % bokelai_id
        log.info(detail_url)

        try:
            br = self.browser
            _raw = br.open_novisit(detail_url, timeout=timeout)
            raw = _raw.read()
        except Exception as e:
            log.exception('Failed to load detail page: %s' % detail_url)
            return

        root = etree.HTML(raw)
        info_json_text = root.xpath(
            "//script[@type='application/ld+json']")[0].text
        log.info(info_json_text)
        info_json = json.loads(info_json_text)

        title = info_json['name']
        authors = info_json['author'][0]['name'].split(",")
        publisher = info_json['publisher'][0]['name']
        isbn = info_json['workExample']['workExample']['isbn']
        pubdate = info_json['datePublished']

        comments = ""
        comments_ele = root.xpath("(//div[@class='content'])[1]//text()")
        comments = "\n".join(comments_ele)

        tags = list()
        for ele in root.xpath("//li[contains(text(),'本書分類:')]/a"):
            log.info(ele.text)
            if "/" in ele.text:
                tags.extend(ele.text.split("/"))
            if "/" in ele.text:
                tags.extend(ele.text.split("/"))
            else:
                tags.append(ele.text)

        cover_url = re.search(r'https[^\?\=\&]*' + bokelai_id + r'[^\?\=\&]*',
                              info_json['image']).group(0)

        if not authors:
            authors = [_('Unknown')]

        log.info(title, authors, publisher, isbn, pubdate, comments, tags,
                 cover_url)

        mi = Metadata(title, authors)
        mi.identifiers = {'bokelai': bokelai_id, 'isbn': isbn}
        mi.publisher = publisher
        mi.comments = comments
        mi.isbn = isbn
        mi.tags = tags
        if pubdate:
            try:
                from calibre.utils.date import parse_date, utcnow
                default = utcnow().replace(day=15)
                mi.pubdate = parse_date(pubdate,
                                        assume_utc=True,
                                        default=default)
            except:
                log.error('Failed to parse pubdate %r' % pubdate)

        if not cover_url is None:
            mi.has_bokelai_cover = cover_url
            self.cache_identifier_to_cover_url(mi.identifiers['bokelai'],
                                               mi.has_bokelai_cover)
        else:
            mi.has_bokelai_cover = None

        result_queue.put(mi)