Esempio n. 1
0
 def test_pprint(self):
     frame = CTOC(element_id=u"foo", flags=3,
                  child_element_ids=[u"ch0"],
                  sub_frames=[TPE2(encoding=3, text=[u"foo"])])
     self.assertEqual(
         frame.pprint(),
         "CTOC=foo flags=3 child_element_ids=ch0\n    TPE2=foo")
Esempio n. 2
0
 def test_write(self):
     frame = CTOC(element_id=u"foo", flags=3,
                  child_element_ids=[u"ch0"],
                  sub_frames=[TPE2(encoding=3, text=[u"f", u"b"])])
     config = ID3SaveConfig(3, "/")
     data = (b"foo\x00\x03\x01ch0\x00TPE2\x00\x00\x00\x0b\x00\x00\x01"
             b"\xff\xfef\x00/\x00b\x00\x00\x00")
     self.assertEqual(frame._writeData(config), data)
Esempio n. 3
0
def generate_ctoc(num_chaps):
    """Generates a CTOC frame for `num_chaps` chapters."""
    return CTOC(element_id=u"toc",
                flags=CTOCFlags.TOP_LEVEL | CTOCFlags.ORDERED,
                child_element_ids=id_list_gen(num_chaps),
                sub_frames=[
                    TIT2(text=u"Primary Chapter List"),
                ])
Esempio n. 4
0
 def test_ctoc_subframes(self):
     id3 = ID3()
     id3.version = (2, 3)
     id3.add(CTOC(sub_frames=[TYER(encoding=0, text="2006")]))
     id3.update_to_v24()
     ctoc = id3.getall("CTOC")[0]
     self.assertEqual(ctoc.sub_frames.getall("TDRC")[0], u"2006")
     self.assertFalse(ctoc.sub_frames.getall("TYER"))
     id3.update_to_v23()
     self.assertEqual(ctoc.sub_frames.getall("TYER")[0], u"2006")
Esempio n. 5
0
    def test_save_v23_recurse_restore(self):
        self.realid3.add(CHAP(sub_frames=[TDRC(text="2006")]))
        self.realid3.add(CTOC(sub_frames=[TDRC(text="2006")]))
        self.id3.save(self.filename, v2_version=3)

        for frame_id in ["CHAP", "CTOC"]:
            chap = self.realid3.getall(frame_id)[0]
            assert chap.sub_frames.getall("TDRC")[0] == "2006"
            new = ID3(self.filename, translate=False)
            assert new.version == (2, 3, 0)
            chap = new.getall(frame_id)[0]
            assert not chap.sub_frames.getall("TDRC")
            assert chap.sub_frames.getall("TYER")[0] == "2006"
Esempio n. 6
0
def add_chapters(filepath, chapters):
    """Adds chapters to the given audio file"""
    audio = ID3(filepath)
    # Adding table of contents
    audio.add(
        CTOC(element_id=u"toc",
             flags=CTOCFlags.TOP_LEVEL | CTOCFlags.ORDERED,
             child_element_ids=[
                 u"chp" + str(i + 1) for i in range(len(chapters))
             ],
             sub_frames=[TIT2(text=[u"Table of Contents"])]))
    # Adding chapters
    for i in range(len(chapters) - 1):
        add_chapter(audio, chapters[i][0], chapters[i + 1][0], chapters[i][1],
                    i + 1)
    length = MutagenFile(filepath).info.length * 1000
    add_chapter(audio, chapters[-1][0], length, chapters[-1][1], len(chapters))
    audio.save()
Esempio n. 7
0
    def test_unknown_chap(self):
        # add ctoc
        id3 = ID3(self.filename)
        id3.add(CTOC(element_id="foo", flags=3, child_element_ids=["ch0"],
                     sub_frames=[TIT2(encoding=3, text=["bla"])]))
        id3.save()

        # pretend we don't know ctoc and save
        id3 = ID3(self.filename, known_frames={"CTOC": CTOC})
        ctoc = id3.getall("CTOC")[0]
        self.assertFalse(ctoc.sub_frames)
        self.assertTrue(ctoc.sub_frames.unknown_frames)
        id3.save()

        # make sure we wrote all sub frames back
        id3 = ID3(self.filename)
        self.assertEqual(
            id3.getall("CTOC")[0].sub_frames.getall("TIT2")[0].text, ["bla"])
Esempio n. 8
0
def run():
    parser = argparse.ArgumentParser(
        prog='odmpy',
        description='Download/return an Overdrive loan audiobook',
        epilog=
        'Version {version}. [Python {py_major}.{py_minor}.{py_micro}-{platform}] '
        'Source at https://github.com/ping/odmpy/'.format(
            version=__version__,
            py_major=sys.version_info.major,
            py_minor=sys.version_info.minor,
            py_micro=sys.version_info.micro,
            platform=sys.platform,
        ))
    parser.add_argument('-v',
                        '--verbose',
                        dest='verbose',
                        action='store_true',
                        help='Enable more verbose messages for debugging')
    parser.add_argument(
        '-t',
        '--timeout',
        dest='timeout',
        type=int,
        default=10,
        help='Timeout (seconds) for network requests. Default 10.')

    subparsers = parser.add_subparsers(
        title='Available commands',
        dest='subparser_name',
        help='To get more help, use the -h option with the command.')
    parser_info = subparsers.add_parser(
        'info',
        description='Get information about a loan file.',
        help='Get information about a loan file')
    parser_info.add_argument('odm_file', type=str, help='ODM file path')

    parser_dl = subparsers.add_parser('dl',
                                      description='Download from a loan file.',
                                      help='Download from a loan file')
    parser_dl.add_argument('-d',
                           '--downloaddir',
                           dest='download_dir',
                           default='.',
                           help='Download folder path')
    parser_dl.add_argument('-c',
                           '--chapters',
                           dest='add_chapters',
                           action='store_true',
                           help='Add chapter marks (experimental)')
    parser_dl.add_argument(
        '-m',
        '--merge',
        dest='merge_output',
        action='store_true',
        help='Merge into 1 file (experimental, requires ffmpeg)')
    parser_dl.add_argument(
        '--mergeformat',
        dest='merge_format',
        choices=['mp3', 'm4b'],
        default='mp3',
        help='Merged file format (m4b is slow, experimental, requires ffmpeg)')
    parser_dl.add_argument(
        '-k',
        '--keepcover',
        dest='always_keep_cover',
        action='store_true',
        help='Always generate the cover image file (cover.jpg)')
    parser_dl.add_argument('-f',
                           '--keepmp3',
                           dest='keep_mp3',
                           action='store_true',
                           help='Keep downloaded mp3 files (after merging)')
    parser_dl.add_argument('-j',
                           '--writejson',
                           dest='write_json',
                           action='store_true',
                           help='Generate a meta json file (for debugging)')
    parser_dl.add_argument('odm_file', type=str, help='ODM file path')

    parser_ret = subparsers.add_parser('ret',
                                       description='Return a loan file.',
                                       help='Return a loan file.')

    parser_ret.add_argument('odm_file', type=str, help='ODM file path')

    args = parser.parse_args()
    if args.verbose:
        logger.setLevel(logging.DEBUG)

    try:
        # test for odm file
        args.odm_file
    except AttributeError:
        parser.print_help()
        exit(0)

    xml_doc = xml.etree.ElementTree.parse(args.odm_file)
    root = xml_doc.getroot()

    # Return Book
    if args.subparser_name == 'ret':
        logger.info('Returning {} ...'.format(args.odm_file))
        early_return_url = root.find('EarlyReturnURL').text
        try:
            early_return_res = requests.get(early_return_url,
                                            headers={'User-Agent': UA_LONG},
                                            timeout=10)
            early_return_res.raise_for_status()
            logger.info('Loan returned successfully: {}'.format(args.odm_file))
        except HTTPError as he:
            if he.response.status_code == 403:
                logger.warning('Loan is probably already returned.')
                sys.exit()
            logger.error(
                'Unexpected HTTPError while trying to return loan {}'.format(
                    args.odm_file))
            logger.error('HTTPError: {}'.format(str(he)))
            logger.debug(he.response.content)
            sys.exit(1)
        except ConnectionError as ce:
            logger.error('ConnectionError: {}'.format(str(ce)))
            sys.exit(1)

        sys.exit()

    metadata = None
    for t in root.itertext():
        if not t.startswith('<Metadata>'):
            continue
        metadata = xml.etree.ElementTree.fromstring(
            # remove invalid & char
            re.sub(r'\s&\s', ' &amp; ', t))
        break

    debug_meta = {}

    title = metadata.find('Title').text
    cover_url = metadata.find(
        'CoverUrl').text if metadata.find('CoverUrl') != None else ''
    authors = [
        unescape_html(c.text) for c in metadata.find('Creators')
        if 'Author' in c.attrib.get('role', '')
    ]
    if not authors:
        authors = [
            unescape_html(c.text) for c in metadata.find('Creators')
            if 'Editor' in c.attrib.get('role', '')
        ]
    if not authors:
        authors = [unescape_html(c.text) for c in metadata.find('Creators')]
    publisher = metadata.find('Publisher').text
    description = metadata.find('Description').text if metadata.find(
        'Description') is not None else ''

    debug_meta['meta'] = {
        'title': title,
        'coverUrl': cover_url,
        'authors': authors,
        'publisher': publisher,
        'description': description,
    }

    # View Book Info
    if args.subparser_name == 'info':
        logger.info(u'{:10} {}'.format('Title:', colored.blue(title)))
        logger.info(u'{:10} {}'.format(
            'Creators:',
            colored.blue(u', '.join([
                u'{} ({})'.format(c.text, c.attrib['role'])
                for c in metadata.find('Creators')
            ]))))
        logger.info(u'{:10} {}'.format('Publisher:',
                                       metadata.find('Publisher').text))
        logger.info(u'{:10} {}'.format(
            'Subjects:',
            u', '.join([c.text for c in metadata.find('Subjects')])))
        logger.info(u'{:10} {}'.format(
            'Languages:',
            u', '.join([c.text for c in metadata.find('Languages')])))
        logger.info(u'{:10} \n{}'.format('Description:',
                                         metadata.find('Description').text))

        for formats in root.findall('Formats'):
            for f in formats:
                logger.info(u'\n{:10} {}'.format('Format:', f.attrib['name']))
                parts = f.find('Parts')
                for p in parts:
                    logger.info('* {} - {} ({:,.0f}kB)'.format(
                        p.attrib['name'], p.attrib['duration'],
                        math.ceil(1.0 * int(p.attrib['filesize']) / 1024)))
        sys.exit()

    # Download Book
    download_baseurl = ''
    download_parts = []
    for formats in root.findall('Formats'):
        for f in formats:
            protocols = f.find('Protocols')
            for p in protocols:
                if p.attrib.get('method', '') != 'download':
                    continue
                download_baseurl = p.attrib['baseurl']
                break
            parts = f.find('Parts')
            for p in parts:
                download_parts.append(p.attrib)
    debug_meta['download_parts'] = download_parts

    logger.info('Downloading "{}" by "{}" in {} parts...'.format(
        colored.blue(title, bold=True), colored.blue(', '.join(authors)),
        len(download_parts)))

    # declare book folder/file names here together so we can catch problems from too long names
    book_folder = os.path.join(
        args.download_dir,
        u'{} - {}'.format(title.replace(os.sep, '-'),
                          u', '.join(authors).replace(os.sep, '-')))

    # for merged mp3
    book_filename = os.path.join(
        book_folder,
        u'{} - {}.mp3'.format(title.replace(os.sep, '-'),
                              u', '.join(authors).replace(os.sep, '-')))
    # for merged m4b
    book_m4b_filename = os.path.join(
        book_folder,
        u'{} - {}.m4b'.format(title.replace(os.sep, '-'),
                              u', '.join(authors).replace(os.sep, '-')))

    if not os.path.exists(book_folder):
        try:
            os.makedirs(book_folder)
        except OSError as exc:
            if exc.errno not in (
                    36, 63):  # ref http://www.ioplex.com/~miallen/errcmpp.html
                raise

            # Ref OSError: [Errno 36] File name too long https://github.com/ping/odmpy/issues/5
            # create book folder, file with just the title
            book_folder = os.path.join(
                args.download_dir, u'{}'.format(title.replace(os.sep, '-')))
            os.makedirs(book_folder)

            book_filename = os.path.join(
                book_folder, u'{}.mp3'.format(title.replace(os.sep, '-')))
            book_m4b_filename = os.path.join(
                book_folder, u'{}.m4b'.format(title.replace(os.sep, '-')))

    cover_filename = os.path.join(book_folder, 'cover.jpg')
    debug_filename = os.path.join(book_folder, 'debug.json')
    if not os.path.isfile(cover_filename) and cover_url:
        cover_res = requests.get(cover_url, headers={'User-Agent': UA})
        cover_res.raise_for_status()
        with open(cover_filename, 'wb') as outfile:
            outfile.write(cover_res.content)

    acquisition_url = root.find('License').find('AcquisitionUrl').text
    media_id = root.attrib['id']

    client_id = str(uuid.uuid1()).upper()
    raw_hash = '{client_id}|{omc}|{os}|ELOSNOC*AIDEM*EVIRDREVO'.format(
        client_id=client_id, omc=OMC, os=OS)
    m = hashlib.sha1(raw_hash.encode('utf-16-le'))
    license_hash = base64.b64encode(m.digest())

    # Extract license
    # License file is downloadable only once per odm
    # so we keep it in case downloads fail
    _, odm_filename = os.path.split(args.odm_file)
    license_file = os.path.join(args.download_dir,
                                odm_filename.replace('.odm', '.license'))

    if os.path.isfile(license_file):
        logger.warning(
            'Already downloaded license file: {}'.format(license_file))
    else:
        # download license file
        params = OrderedDict([('MediaID', media_id), ('ClientID', client_id),
                              ('OMC', OMC), ('OS', OS),
                              ('Hash', license_hash)])

        license_res = requests.get(acquisition_url,
                                   params=params,
                                   headers={'User-Agent': UA},
                                   timeout=args.timeout,
                                   stream=True)
        try:
            license_res.raise_for_status()
            with open(license_file, 'wb') as outfile:
                for chunk in license_res.iter_content(1024):
                    outfile.write(chunk)
            logger.debug('Saved license file {}'.format(license_file))

        except HTTPError as he:
            if he.response.status_code == 404:
                # odm file has expired
                logger.error('The loan file "{}" has expired.'
                             'Please download again.'.format(args.odm_file))
            else:
                logger.error(he.response.content)
            sys.exit(1)
        except ConnectionError as ce:
            logger.error('ConnectionError: {}'.format(str(ce)))
            sys.exit(1)

    license_xml_doc = xml.etree.ElementTree.parse(license_file)
    license_root = license_xml_doc.getroot()

    ns = '{http://license.overdrive.com/2008/03/License.xsd}'

    license_client = license_root.find('{}SignedInfo'.format(ns)).find(
        '{}ClientID'.format(ns))
    license_client_id = license_client.text

    lic_file_contents = ''
    with open(license_file, 'r') as lic_file:
        lic_file_contents = lic_file.read()

    cover_bytes = None
    if os.path.isfile(cover_filename):
        with open(cover_filename, 'rb') as f:
            cover_bytes = f.read()

    track_count = 0
    file_tracks = []
    keep_cover = args.always_keep_cover
    audio_lengths_ms = []
    for p in download_parts:
        part_number = int(p['number'])
        part_filename = os.path.join(
            book_folder, u'{}.mp3'.format(
                slugify(u'{} - Part {:02d}'.format(title, part_number),
                        allow_unicode=True)))
        part_tmp_filename = u'{}.part'.format(part_filename)
        part_file_size = int(p['filesize'])
        part_url_filename = p['filename']
        part_download_url = '{}/{}'.format(download_baseurl, part_url_filename)
        part_markers = []

        if os.path.isfile(part_filename):
            logger.warning('Already saved {}'.format(
                colored.magenta(part_filename)))
        else:
            try:
                part_download_res = requests.get(part_download_url,
                                                 headers={
                                                     'User-Agent': UA,
                                                     'ClientID':
                                                     license_client_id,
                                                     'License':
                                                     lic_file_contents
                                                 },
                                                 timeout=args.timeout,
                                                 stream=True)

                part_download_res.raise_for_status()

                chunk_size = 1024 * 1024
                expected_chunk_count = math.ceil(1.0 * part_file_size /
                                                 chunk_size)
                with open(part_tmp_filename, 'wb') as outfile:
                    for chunk in progress.bar(
                            part_download_res.iter_content(
                                chunk_size=chunk_size),
                            label='Part {}'.format(part_number),
                            expected_size=expected_chunk_count):
                        if chunk:
                            outfile.write(chunk)

                # try to remux file to remove mp3 lame tag errors
                cmd = [
                    'ffmpeg', '-y', '-nostdin', '-hide_banner', '-loglevel',
                    'info' if logger.level == logging.DEBUG else 'error', '-i',
                    part_tmp_filename, '-c:a', 'copy', '-c:v', 'copy',
                    part_filename
                ]
                try:
                    exit_code = subprocess.call(cmd)
                    if exit_code:
                        logger.warning(
                            'ffmpeg exited with the code: {0!s}'.format(
                                exit_code))
                        logger.warning('Command: {0!s}'.format(' '.join(cmd)))
                        os.rename(part_tmp_filename, part_filename)
                    else:
                        os.remove(part_tmp_filename)
                except Exception as ffmpeg_ex:
                    logger.warning('Error executing ffmpeg: {}'.format(
                        str(ffmpeg_ex)))
                    os.rename(part_tmp_filename, part_filename)

            except HTTPError as he:
                logger.error('HTTPError: {}'.format(str(he)))
                logger.debug(he.response.content)
                sys.exit(1)

            except ConnectionError as ce:
                logger.error('ConnectionError: {}'.format(str(ce)))
                sys.exit(1)

        try:
            # Fill id3 info for mp3 part
            mutagen_audio = MP3(part_filename, ID3=ID3)
            if not mutagen_audio.tags:
                mutagen_audio.tags = ID3()
            if 'TIT2' not in mutagen_audio.tags:
                mutagen_audio.tags.add(
                    TIT2(encoding=3, text=u'{}'.format(title)))
            if 'TIT3' not in mutagen_audio.tags:
                mutagen_audio.tags.add(
                    TIT3(encoding=3, text=u'{}'.format(description)))
            if 'TALB' not in mutagen_audio.tags:
                mutagen_audio.tags.add(
                    TALB(encoding=3, text=u'{}'.format(title)))
            if 'TPE1' not in mutagen_audio.tags:
                mutagen_audio.tags.add(
                    TPE1(encoding=3, text=u'{}'.format(authors[0])))
            if 'TPE2' not in mutagen_audio.tags:
                mutagen_audio.tags.add(
                    TPE2(encoding=3, text=u'{}'.format(authors[0])))
            if 'TRCK' not in mutagen_audio.tags:
                mutagen_audio.tags.add(
                    TRCK(encoding=3, text=u'{}'.format(part_number)))
            if 'TPUB' not in mutagen_audio.tags:
                mutagen_audio.tags.add(
                    TPUB(encoding=3, text=u'{}'.format(publisher)))
            if 'COMM' not in mutagen_audio.tags:
                mutagen_audio.tags.add(
                    COMM(encoding=3,
                         desc=u'Description',
                         text=u'{}'.format(description)))
            if cover_bytes:
                mutagen_audio.tags.add(
                    APIC(encoding=3,
                         mime=u'image/jpeg',
                         type=3,
                         desc=u'Cover',
                         data=cover_bytes))
            mutagen_audio.save()

            audio_lengths_ms.append(
                int(round(mutagen_audio.info.length * 1000)))

            # Extract OD chapter info from mp3s for use in merged file
            if 'TXXX:OverDrive MediaMarkers' in mutagen_audio.tags \
                    and mutagen_audio.tags['TXXX:OverDrive MediaMarkers'].text:
                marker_text = mutagen_audio.tags[
                    'TXXX:OverDrive MediaMarkers'].text[0]
                try:
                    tree = xml.etree.ElementTree.fromstring(marker_text)
                except UnicodeEncodeError:
                    tree = xml.etree.ElementTree.fromstring(
                        marker_text.encode('ascii', 'ignore').decode('ascii'))

                for m in tree.iter('Marker'):
                    marker_name = m.find('Name').text.strip()
                    marker_timestamp = m.find('Time').text
                    timestamp = None
                    ts_mark = 0
                    # 2 timestamp formats found
                    for r in ('%M:%S.%f', '%H:%M:%S.%f'):
                        try:
                            timestamp = time.strptime(marker_timestamp, r)
                            ts = datetime.timedelta(hours=timestamp.tm_hour,
                                                    minutes=timestamp.tm_min,
                                                    seconds=timestamp.tm_sec)
                            ts_mark = int(1000 * ts.total_seconds())
                            break
                        except ValueError:
                            pass

                    if not timestamp:
                        # some invalid timestamp string, e.g. 60:15.00
                        mobj = re.match(MARKER_TIMESTAMP_HHMMSS,
                                        marker_timestamp)
                        if mobj:
                            ts_mark = int(mobj.group('hr')) * 60 * 60 * 1000 + \
                                      int(mobj.group('min')) * 60 * 1000 + \
                                      int(mobj.group('sec')) * 1000 + \
                                      int(mobj.group('ms'))
                        else:
                            mobj = re.match(MARKER_TIMESTAMP_MMSS,
                                            marker_timestamp)
                            if mobj:
                                ts_mark = int(mobj.group('min')) * 60 * 1000 + \
                                          int(mobj.group('sec')) * 1000 + \
                                          int(mobj.group('ms'))
                            else:
                                raise ValueError(
                                    'Invalid marker timestamp: {}'.format(
                                        marker_timestamp))

                    track_count += 1
                    part_markers.append((u'ch{:02d}'.format(track_count),
                                         marker_name, ts_mark))

            if args.add_chapters and not args.merge_output:
                # set the chapter marks
                generated_markers = []
                for j, file_marker in enumerate(part_markers):
                    generated_markers.append({
                        'id':
                        file_marker[0],
                        'text':
                        file_marker[1],
                        'start_time':
                        int(file_marker[2]),
                        'end_time':
                        int(
                            round(mutagen_audio.info.length *
                                  1000) if j == (len(part_markers) -
                                                 1) else part_markers[j +
                                                                      1][2]),
                    })

                mutagen_audio.tags.add(
                    CTOC(element_id=u'toc',
                         flags=CTOCFlags.TOP_LEVEL | CTOCFlags.ORDERED,
                         child_element_ids=[
                             m['id'].encode('ascii') for m in generated_markers
                         ],
                         sub_frames=[TIT2(text=[u'Table of Contents'])]))
                for i, m in enumerate(generated_markers):
                    mutagen_audio.tags.add(
                        CHAP(element_id=m['id'].encode('ascii'),
                             start_time=m['start_time'],
                             end_time=m['end_time'],
                             sub_frames=[TIT2(text=[u'{}'.format(m['text'])])
                                         ]))
                    start_time = datetime.timedelta(
                        milliseconds=m['start_time'])
                    end_time = datetime.timedelta(milliseconds=m['end_time'])
                    logger.debug(
                        u'Added chap tag => {}: {}-{} "{}" to "{}"'.format(
                            colored.cyan(m['id']), start_time, end_time,
                            colored.cyan(m['text']),
                            colored.blue(part_filename)))

                if len(generated_markers) == 1:
                    # Weird player problem on voice where title is shown instead of chapter title
                    mutagen_audio.tags.add(
                        TIT2(encoding=3, text=u'{}'.format(title)))

                mutagen_audio.save()

        except Exception as e:
            logger.warning('Error saving ID3: {}'.format(
                colored.red(str(e), bold=True)))
            keep_cover = True

        logger.info('Saved "{}"'.format(colored.magenta(part_filename)))

        file_tracks.append({
            'file': part_filename,
            'markers': part_markers,
        })
    # end loop: for p in download_parts:

    debug_meta['audio_lengths_ms'] = audio_lengths_ms
    debug_meta['file_tracks'] = file_tracks

    if args.merge_output:
        if os.path.isfile(book_filename if args.merge_format ==
                          'mp3' else book_m4b_filename):
            logger.warning('Already saved "{}"'.format(
                colored.magenta(book_filename if args.merge_format ==
                                'mp3' else book_m4b_filename)))
            sys.exit(0)

        logger.info('Generating "{}"...'.format(
            colored.magenta(book_filename if args.merge_format ==
                            'mp3' else book_m4b_filename)))

        # We can't directly generate a m4b here even if specified because eyed3 doesn't support m4b/mp4
        temp_book_filename = '{}.part'.format(book_filename)
        cmd = [
            'ffmpeg',
            '-y',
            '-nostdin',
            '-hide_banner',
            '-loglevel',
            'info' if logger.level == logging.DEBUG else 'error',
            '-stats',
            '-i',
            'concat:{}'.format('|'.join([ft['file'] for ft in file_tracks])),
            '-acodec',
            'copy',
            '-b:a',
            '64k',  # explicitly set audio bitrate
            '-f',
            'mp3',
            temp_book_filename
        ]
        exit_code = subprocess.call(cmd)

        if exit_code:
            logger.error(
                'ffmpeg exited with the code: {0!s}'.format(exit_code))
            logger.error('Command: {0!s}'.format(' '.join(cmd)))
            exit(exit_code)
        os.rename(temp_book_filename, book_filename)

        mutagen_audio = MP3(book_filename, ID3=ID3)
        if not mutagen_audio.tags:
            mutagen_audio.tags = ID3()
        # Overwrite title since it gets picked up from the first track by default
        mutagen_audio.tags.add(TIT2(encoding=3, text=u'{}'.format(title)))
        if 'TALB' not in mutagen_audio.tags:
            mutagen_audio.tags.add(TALB(encoding=3, text=u'{}'.format(title)))
        if 'TPE1' not in mutagen_audio.tags:
            mutagen_audio.tags.add(
                TPE1(encoding=3, text=u'{}'.format(authors[0])))
        if 'TPE2' not in mutagen_audio.tags:
            mutagen_audio.tags.add(
                TPE2(encoding=3, text=u'{}'.format(authors[0])))
        if 'TPUB' not in mutagen_audio.tags:
            mutagen_audio.tags.add(
                TPUB(encoding=3, text=u'{}'.format(publisher)))
        if 'COMM' not in mutagen_audio.tags:
            mutagen_audio.tags.add(
                COMM(encoding=3,
                     desc=u'Description',
                     text=u'{}'.format(description)))
        mutagen_audio.save()

        if args.add_chapters:
            merged_markers = []
            for i, f in enumerate(file_tracks):
                prev_tracks_len_ms = 0 if i == 0 else reduce(
                    lambda x, y: x + y, audio_lengths_ms[0:i])
                this_track_endtime_ms = int(
                    reduce(lambda x, y: x + y, audio_lengths_ms[0:i + 1]))
                file_markers = f['markers']
                for j, file_marker in enumerate(file_markers):
                    merged_markers.append({
                        'id':
                        file_marker[0],
                        'text':
                        u'{}'.format(file_marker[1]),
                        'start_time':
                        int(file_marker[2]) + prev_tracks_len_ms,
                        'end_time':
                        int(this_track_endtime_ms if j == (
                            len(file_markers) -
                            1) else file_markers[j + 1][2] +
                            prev_tracks_len_ms),
                    })
            debug_meta['merged_markers'] = merged_markers

            mutagen_audio.tags.add(
                CTOC(element_id=u'toc',
                     flags=CTOCFlags.TOP_LEVEL | CTOCFlags.ORDERED,
                     child_element_ids=[
                         m['id'].encode('ascii') for m in merged_markers
                     ],
                     sub_frames=[TIT2(text=[u"Table of Contents"])]))
            for i, m in enumerate(merged_markers):
                mutagen_audio.tags.add(
                    CHAP(element_id=m['id'].encode('ascii'),
                         start_time=m['start_time'],
                         end_time=m['end_time'],
                         sub_frames=[TIT2(text=[u'{}'.format(m['text'])])]))
                start_time = datetime.timedelta(milliseconds=m['start_time'])
                end_time = datetime.timedelta(milliseconds=m['end_time'])
                logger.debug(
                    u'Added chap tag => {}: {}-{} "{}" to "{}"'.format(
                        colored.cyan(m['id']), start_time, end_time,
                        colored.cyan(m['text']), colored.blue(book_filename)))

        mutagen_audio.save()

        if args.merge_format == 'mp3':
            logger.info('Merged files into "{}"'.format(
                colored.magenta(book_filename if args.merge_format ==
                                'mp3' else book_m4b_filename)))

        if args.merge_format == 'm4b':
            temp_book_m4b_filename = '{}.part'.format(book_m4b_filename)
            cmd = [
                'ffmpeg',
                '-y',
                '-nostdin',
                '-hide_banner',
                '-loglevel',
                'info' if logger.level == logging.DEBUG else 'error',
                '-stats',
                '-i',
                book_filename,
            ]
            if os.path.isfile(cover_filename):
                cmd.extend(['-i', cover_filename])

            cmd.extend([
                '-map',
                '0:a',
                '-c:a',
                'aac',
                '-b:a',
                '64k',  # explicitly set audio bitrate
            ])
            if os.path.isfile(cover_filename):
                cmd.extend([
                    '-map',
                    '1:v',
                    '-c:v',
                    'copy',
                    '-disposition:v:0',
                    'attached_pic',
                ])

            cmd.extend(['-f', 'mp4', temp_book_m4b_filename])
            exit_code = subprocess.call(cmd)

            if exit_code:
                logger.error(
                    'ffmpeg exited with the code: {0!s}'.format(exit_code))
                logger.error('Command: {0!s}'.format(' '.join(cmd)))
                exit(exit_code)

            os.rename(temp_book_m4b_filename, book_m4b_filename)
            logger.info('Merged files into "{}"'.format(
                colored.magenta(book_m4b_filename)))
            try:
                os.remove(book_filename)
            except Exception as e:
                logger.warning('Error deleting "{}": {}'.format(
                    book_filename, str(e)))

        if not args.keep_mp3:
            for f in file_tracks:
                try:
                    os.remove(f['file'])
                except Exception as e:
                    logger.warning('Error deleting "{}": {}'.format(
                        f['file'], str(e)))

    if not keep_cover:
        try:
            os.remove(cover_filename)
        except Exception as e:
            logger.warning('Error deleting "{}": {}'.format(
                cover_filename, str(e)))

    if args.write_json:
        with open(debug_filename, 'w') as outfile:
            json.dump(debug_meta, outfile, indent=2)
Esempio n. 9
0
def download(broadcast, targetDir, reliveUrlTemplate):
    broadcastStartDT = parse(broadcast['start'])
    broadcastEndDT = parse(broadcast['end'])

    # build filename from channel, show title and broadcast datetime, while escaping "bad" characters
    filename = os.path.join(
        targetDir,
        re.sub(
            '[^\w\s\-\.\[\]]', '_',
            broadcast['trackingInfos']['pageVars']['broadcast_service'] + ' ' +
            broadcastStartDT.astimezone(
                pytz.timezone('Europe/Berlin')).strftime("%Y-%m-%d %H:%M") +
            ' ' + broadcast['trackingInfos']['pageVars']['topline']) + ".mp3")

    # skip broadcast if file is already exists
    if os.path.isfile(filename) and os.path.getsize(filename) > 0:
        print("%s already exists, skipping." % filename, flush=True)
        return

    # get links to all audio segments of this broadcast
    segmentUrls = getSegmentUrls(broadcastStartDT, broadcastEndDT,
                                 reliveUrlTemplate)
    if segmentUrls is None:
        # skip broadcast if no segments available
        print("Skipping %s, not yet in relive" % filename)
        return

    # dowload all ts segments, and convert them to mp3
    print("Downloading %s ..." % filename, end=" ", flush=True)

    try:
        sound = AudioSegment.empty()
        for i in segmentUrls:
            sound += AudioSegment.from_file(BytesIO(urlopen(i).read()))
        sound.export(filename, format="mp3")
    except:
        print("failed.", flush=True)
        return
    else:
        print("done.", flush=True)

    # ID3: remove all tags
    try:
        tags = ID3(filename)
        tags.delete()
    except ID3NoHeaderError:
        tags = ID3()

    # ID3: save as much information as possible in the ID3 tags
    tags.add(
        TRSN(
            text=[broadcast['trackingInfos']['pageVars']['broadcast_service']
                  ]))
    tags.add(
        TPE1(
            text=[broadcast['trackingInfos']['pageVars']['broadcast_service']
                  ]))
    tags.add(
        TALB(text=[
            " - ".join(
                list(
                    dict.fromkeys([
                        broadcast['trackingInfos']['pageVars']['topline'],
                        broadcast['trackingInfos']['pageVars']['title']
                    ])))
        ]))
    tags.add(TRCK(text=['1/1']))
    #tags.add(TIT2(text=[broadcastStartDT.astimezone(pytz.timezone('Europe/Berlin')).strftime("%Y-%m-%d %H:%M")]))
    tags.add(TIT2(text=[broadcast['publicationOf']['title']]))
    tags.add(
        COMM(lang="deu",
             desc="desc",
             text=[broadcast['publicationOf']['description']]))
    tags.add(
        TYER(text=[
            broadcastStartDT.astimezone(pytz.timezone(
                'Europe/Berlin')).strftime("%Y")
        ]))
    tags.add(
        TDAT(text=[
            broadcastStartDT.astimezone(pytz.timezone(
                'Europe/Berlin')).strftime("%d%m")
        ]))
    tags.add(
        TIME(text=[
            broadcastStartDT.astimezone(pytz.timezone(
                'Europe/Berlin')).strftime("%H%M")
        ]))
    tags.add(
        TLEN(text=[
            int((broadcastEndDT - broadcastStartDT).total_seconds() * 1000)
        ]))
    tags.add(WOAS(url=broadcast['publicationOf']['canonicalUrl']))
    tags.add(WORS(url="https://www.br.de/radio/"))

    # ID3: chapters
    chapterNr = 0
    for chapter in broadcast['items']:
        chapterStartDT = parse(chapter['start'])

        if 'duration' in chapter and chapter['duration'] is not None:
            chapterEndDT = chapterStartDT + timedelta(
                seconds=chapter['duration'])
        else:
            chapterEndDT = broadcastEndDT

        artists = []
        for i in ['performer', 'author']:
            if i in chapter and chapter[i] is not None and len(chapter[i]) > 0:
                artists.append(chapter[i])

        titles = []
        for i in ['title']:
            if i in chapter and chapter[i] is not None and len(chapter[i]) > 0:
                titles.append(chapter[i])

        tags.add(
            CHAP(element_id=chapterNr,
                 start_time=floor(
                     (chapterStartDT - broadcastStartDT).total_seconds() *
                     1000),
                 end_time=ceil(
                     (chapterEndDT - broadcastStartDT).total_seconds() * 1000),
                 sub_frames=[
                     TIT2(text=[
                         " - ".join([" ".join(artists), " ".join(titles)])
                     ])
                 ]))
        chapterNr += 1

    tocList = ",".join([str(i) for i in range(0, chapterNr)])

    tags.add(
        CTOC(element_id="toc",
             flags=CTOCFlags.TOP_LEVEL | CTOCFlags.ORDERED,
             child_element_ids=[tocList],
             sub_frames=[TIT2(text=["Table Of Contents"])]))

    # ID3: cover image
    response = requests.get(
        broadcast['publicationOf']['defaultTeaserImage']['url'], timeout=5)
    if response.status_code == 200:
        tags.add(
            APIC(mime=response.headers['content-type'],
                 desc="Front Cover",
                 data=response.content))

    # save ID3 tags
    tags.save(filename, v2_version=3)
Esempio n. 10
0
        tags.add(WOAS(url=showInfo['website']))
        tags.add(WORS(url=stationInfo['website']))

        for chapter in showInfo['parts'][partNo]['chapters']:
            tags.add(
                CHAP(element_id=chapter["id"],
                     start_time=chapter["start_ms"],
                     end_time=chapter["end_ms"],
                     sub_frames=[TIT2(text=[chapter["title"]])]))

        tocList = ",".join([
            chapter["id"] for chapter in showInfo['parts'][partNo]['chapters']
        ])
        tags.add(
            CTOC(element_id="toc",
                 flags=CTOCFlags.TOP_LEVEL | CTOCFlags.ORDERED,
                 child_element_ids=[tocList],
                 sub_frames=[TIT2(text=["Table Of Contents"])]))

        if showInfo['image_mime'] is not None and showInfo[
                'image_data'] is not None:
            tags.add(
                APIC(mime=showInfo['image_mime'],
                     desc="Front Cover",
                     data=showInfo['image_data']))

        # save ID3 tags
        tags.save(showInfo['parts'][partNo]['filepath'] + ".part",
                  v2_version=3)

        # done!
        os.rename(showInfo['parts'][partNo]['filepath'] + ".part",