Esempio n. 1
0
def main():
    try:
        _g.conf = Struct()
        args    = init_args()

        local_import()
        init_config()

        if args.outdir:
            _g.conf._outdir = args.outdir
        else:
            _g.conf._outdir = _g.conf._default_outdir

        if args.auth:
            up = args.auth.split(':', 1)

            if len(up) == 1 or '' in up:
                _out.die('argument -a: bad auth format')

            _g.conf._user, _g.conf._pass = up

        if args.silent or _g.conf._no_output:
            # go ahead and set this so it is globally known.
            # there is no need for distinction at this point.
            _g.conf._no_output = True
            _g.log.addFilter(nullfilter)

        ret = main_loop(args.manga)
    except (KeyboardInterrupt, EOFError) as e:
        print()
        _out._('caught {} signal, exiting...'.format(type(e).__name__))
        return 0

    return ret
Esempio n. 2
0
def main_loop(manga_list):
    global compc, compv

    for m in manga_list:
            req               = _parsers.ParseRequest(m)
            sout, title, path = get_listing(req._name)

            if _g.conf._usecache and _g.conf._found_in_cache:
                sout = subdir_recurse(sout, path)
            else:
                sout = sout.splitlines()
                sout = rem_subdir_recurse(sout, path)

            compv, compc, allf, compfile = walk_thru_listing(req, title, sout)

            if req._vols and req._vols[-1] == req.ALL:
                del req._vols[-1]

            if req._chps and req._chps[-1] == req.ALL:
                del req._chps[-1]

            missv = str([v for v in req._vols if v not in compv]).strip('[]')
            missc = str([c for c in req._chps if c not in compc]).strip('[]')

            if missv:
                _out._("couldn't find vol(s): " + missv)

            if missc:
                _out._("couldn't find chp(s): " + missc)

            if any((compfile, compc, compv)):
                # XXX sigh...
                # need to append MLOC when we get a cache match.
                ppfx = ''.join(['https://', loc['DOMAIN']])

                if _g.conf._found_in_cache:
                    ppfx = ''.join([ppfx, loc['MLOC']])

                try:
                    stdscr          = unicurses.initscr()
                    _g.conf._stdscr = stdscr
                    unicurses.noecho()

                    if compfile:
                        _out._('downloading complete archive... ', end='')
                        _g.conf._stdscr.erase()
                        _g.conf._stdscr.addstr(0, 0, compfile.name)
                        _g.conf._stdscr.refresh()
                        _curl.curl_to_file('/'.join([ppfx, 
                                                     _util.create_nwo_basename(
                                                        compfile.basename),
                                                     urllib
                                                       .parse
                                                       .quote(compfile.name)]),
                                           compfile.name, 'HTTP')
                    elif compv or compc:
                        _out._('downloading volume/chapters... ', end='')
                        for f,v,c in allf:
                            #_g.log.info('DL ' + f)
                            _g.conf._stdscr.erase()
                            _g.conf._stdscr.addstr(0, 0, 'title - {}'
                                                           .format(title))
                            _g.conf._stdscr.addstr(1, 0, 'current - {}'
                                                           .format(f.name))
                            _g.conf._stdscr.refresh()
                            _curl.curl_to_file('/'.join([ppfx,
                                                         _util
                                                           .create_nwo_basename(                                                             f.basename),
                                                         urllib
                                                           .parse
                                                           .quote(f.name)]),
                                               f.name, 'HTTP')
                except:
                    raise
                finally:
                    unicurses.nocbreak()
                    _g.conf._stdscr.keypad(False)
                    unicurses.echo()
                    unicurses.endwin()

                print('done', file=sys.stderr)
            else:
                _out._('could not find any requested volume/chapters.')
                return 1

    return 0
Esempio n. 3
0
def get_listing(manga):
    badret = ('', '')

    if _g.conf._usecache:
        # XXX move this
        def match_dir(diriter, ldict):
            global mlow

            try:
                cdir = next(diriter)
            except StopIteration:
                for cdict in ldict:
                    if cdict['name'].lower() == mlow:
                        return (cdict['contents'], cdict['name'])
                return None

            for cdict in ldict:
                if cdict['name'] == cdir:
                    return match_dir(diriter, cdict['contents'])
            else:
                return None

        jsonloc = os.path.join(_g.conf._home, '.cache', 'madodl',
                               'files.json') \
            if not _g.conf._cachefile else _g.conf._cachefile

        jsondirloc = os.path.dirname(jsonloc)

        if not os.path.exists(jsonloc):
            os.makedirs(jsondirloc, 0o770, True)
            _curl.curl_json_list(jsonloc, True)

        assert os.path.exists(jsonloc)

        path = _util.create_nwo_path(manga)
        d1,d2,d3 = path.split('/')
        mdir = None

        with breaks(open(jsonloc, errors='surrogateescape')) as f:
            jobj = json.load(f)

            for o in jobj[0].get('contents'):
                if o['name'] == 'Manga':
                    jobj = o['contents']
                    break

            global mlow
            mlow = manga.lower()
            mdir, title = match_dir(iter((d1,d2,d3)), jobj) or badret

            if not mdir:
                _g.log.warning("couldn't find title in JSON file. Trying "
                               "online query.")
                _g.conf._found_in_cache = False
                raise breaks.Break

            _g.conf._found_in_cache = True
            _g.conf._cururl = 'https://{}{}{}/{}/{}/{}'.format(loc['DOMAIN'],
                                            loc['MLOC'], d1, d2, d3, title)

            _g.log.info('\n-----\n{}-----'.format(mdir))

            path = '/'.join((path, title))

            return (mdir, title, path)

    qout = search_query(manga).getvalue().decode()
    qp   = _parsers.ParseQuery()
    qp.feed(qout)

    # FIXME:
    # this is a temporary workaround to
    # filter out non-manga results until
    # madokami allows for this granularity itself.
    qp.mresultnum = 0
    qp.mresults   = []
    for url, r in qp.results:
        if r.startswith('/Manga') and r.count('/') == 5:
            qp.mresults.append([url,r])
            qp.mresultnum += 1

    if qp.mresultnum == 0:
        _out.die('manga not found')

    if qp.mresultnum > 1:
        print('Multiple matches found. Please choose from the '
              'selection below:\n')
        i = 1
        for url, f in qp.mresults:
            print('{}: {}'.format(i, os.path.basename(f)))
            i += 1

        print()

        while 1:
            try:
                ch = int(input('choice > '))
                if ch in range(1, i):
                    break
                print('Pick a number between 1 and {}'.format(i-1))
            except ValueError:
                print('Invalid input.')

        m = qp.mresults[ch-1][0]
        title = os.path.basename(qp.mresults[ch-1][1])
    else:
        m = qp.mresults[0][0]
        title = os.path.basename(qp.mresults[0][1])
        _out._('one match found: {}'.format(title))

    dirls = search_exact(m, True).getvalue().decode()

    _g.log.info('\n-----\n{}-----'.format(dirls))

    return (dirls, title, m)