示例#1
0
def get_question_details(question_id):
    """ Input: question_id = string
        Output: render_template with different attributes

        This function collects the questions'/answers' details for the table creating on the question_details.html

        ATTRIBUTES:
            question = {dictionary}
            question_header = {list}
            title = {string}
            answers = {list which has dictionaries}
            answers_header = {list}
            edit_question_url = {string}
        """
    question = data_manager.read_from_csv(id=question_id)
    title = copy.deepcopy(question["title"])
    del question["title"]
    question_header = util.create_header(question)

    all_answers = data_manager.read_from_csv(data_manager.ANSWER_FILE_PATH)
    answers = [
        answer for answer in all_answers
        if answer["question_id"] == question_id
    ]
    answers_header = util.create_header(question)

    edit_question_url = url_for('route_edit_question', question_id=question_id)
    return render_template('question_details.html',
                           question=question,
                           question_header=question_header,
                           title=title,
                           answers=answers,
                           answers_header=answers_header,
                           edit_question_url=edit_question_url)
示例#2
0
def compress(source, target, pagesize=4096):
    logging.debug("Starting compression of %s to %s", repr(source), repr(target))
    logging.debug("Page size: %d",  pagesize)
    size = os.path.getsize(source)
    with open(target, "wb") as ftarget:
        ftarget.write(util.create_header("7zip", size))
        ftarget.flush()
        p = subprocess.Popen(["7za", "a", "-an", "-txz", "-mx=9", "-so", source], stdout=ftarget, stderr=subprocess.PIPE)
        p.communicate()
    logging.debug("Done")
示例#3
0
def compress(source, target, pagesize=4096):
    logging.debug("Starting compression of %s to %s", repr(source),
                  repr(target))
    logging.debug("Page size: %d", pagesize)
    size = os.path.getsize(source)
    with open(target, "wb") as ftarget:
        ftarget.write(util.create_header("gzip", size))
        with gzip.GzipFile(fileobj=ftarget, mode="wb",
                           compresslevel=9) as ftarget:
            for i, page in enumerate(util.get_pages(source,
                                                    pagesize=pagesize)):
                if i % 100 == 0 or (i + 1) * pagesize == size:
                    sys.stdout.write("\rProgress: {:.2f}%".format(
                        float(i * pagesize) / size * 100))
                    sys.stdout.flush()
                ftarget.write(page)
    sys.stdout.write("\n")
    logging.debug("Done")
def compress(source, target, reference, nointra, delta, inner, pagesize=4096):
    # some info
    logging.debug("Starting compression of %s to %s", repr(source),
                  repr(target))
    logging.debug("Page size: %d", pagesize)
    logging.debug("Reference dump: %s", reference)

    # pages + page numbers bookkeeping
    reference_pages, reference_pagenrs = [], {}
    for i, page in enumerate(util.get_pages(reference)):
        reference_pages.append(page)
        if page not in reference_pagenrs:
            reference_pagenrs[page] = i
    reference_pages_set = set(reference_pages)

    # find new + duplicatable pages
    dedups = dd(list)
    diffs = dd()
    diff_seen = set()
    if nointra:
        new_pagenrs = []
    else:
        new_pagenrs = dd(list)
    new_pages = []
    same_distinct, same_total = set(), 0
    source_pages = []
    for i, page in enumerate(util.get_pages(source)):
        source_pages.append(page)
        if reference_pages[i] != page:
            if page not in reference_pages_set:
                if delta is not None:
                    d = util.create_diff(reference_pages[i], page)
                    if d is not None:
                        diff_seen.add(page)
                        diffs[i] = d
                        continue
                if nointra:
                    new_pagenrs.append(i)
                else:
                    new_pagenrs[page].append(i)
                new_pages.append(page)
            else:
                dedups[page].append(i)
        else:
            same_total += 1
            same_distinct.add(page)
    source_pages_set = set(source_pages)
    newpagescnt = len(new_pages), len(set(new_pages))

    # intervalize
    if nointra:
        new_pagenrs = util.intervalize(new_pagenrs)
    else:
        new_pagenrs = {
            page: util.intervalize(new_pagenrs[page])
            for page in new_pagenrs
        }
    dedups = {page: util.intervalize(dedups[page]) for page in dedups}

    # write file
    util.create_dir(".tmp")
    tmphandle, tmpfile = tempfile.mkstemp(dir=".tmp")
    try:
        with open(tmpfile, "wb") as ftmp:
            ftmp.write(reference + "\x00")
            inorder = []
            seen = set()
            for page in reference_pages:
                if page in dedups and page not in seen:
                    inorder.append(page)
                    seen.add(page)
            util.create_pagenr_list(
                [reference_pagenrs[page] for page in inorder], ftmp)
            for page in inorder:
                ftmp.write(util.create_interval_list(dedups[page]))
            if delta is not None:
                util.create_pagenr_list(sorted(diffs), ftmp)
                for pagenr in sorted(diffs):
                    ftmp.write(diffs[pagenr])
            if nointra:
                ftmp.write(util.create_interval_list(new_pagenrs))
                for page in new_pages:
                    ftmp.write(page)
            else:
                ftmp.write(struct.pack("<I", len(new_pagenrs)))
                for page in new_pagenrs:
                    ftmp.write(util.create_interval_list(new_pagenrs[page]))
                for page in new_pagenrs:
                    ftmp.write(page)
        with open(tmpfile, "rb") as ftmp, open(target, "wb") as ftarget:
            ftarget.write(
                util.create_header(create_method_name(nointra, delta, inner),
                                   os.path.getsize(source)))
            ftarget.flush()
            if inner is None:
                shutil.copyfileobj(ftmp, ftarget)
            elif inner == "gzip":
                with gzip.GzipFile(fileobj=ftarget, mode="wb",
                                   compresslevel=9) as ftarget:
                    shutil.copyfileobj(ftmp, ftarget)
            elif inner == "bzip2":
                with bz2file.BZ2File(filename=ftarget,
                                     mode="wb",
                                     compresslevel=9) as ftarget:
                    shutil.copyfileobj(ftmp, ftarget)
            elif inner == "7zip":
                p = subprocess.Popen(
                    ["7za", "a", "-an", "-txz", "-mx=9", "-si", "-so", source],
                    stdin=ftmp,
                    stdout=ftarget,
                    stderr=subprocess.PIPE)
                p.communicate()
    finally:
        os.close(tmphandle)
        os.remove(tmpfile)

    # some info
    dedup_distinct = len(set(dedups.keys()) | same_distinct)
    dedup_total = same_total + sum(b - a + 1 for l in dedups.values()
                                   for a, b in l)
    logging.debug("Deduplicated pages at the same offset: %d/%d (%d/%d)",
                  same_total, len(source_pages), len(same_distinct),
                  len(source_pages_set))
    logging.debug("Deduplicated pages at different offsets: %d/%d (%d/%d)",
                  dedup_total - same_total, len(source_pages), len(dedups),
                  len(source_pages_set))
    logging.debug("Deduplicated pages in total: %d/%d (%d/%d)", dedup_total,
                  len(source_pages), dedup_distinct, len(source_pages_set))
    if delta is not None:
        logging.debug("Diffed pages: %d/%d (%d/%d)", len(diffs),
                      len(source_pages), len(diff_seen), len(source_pages_set))
    logging.debug("New pages: %d/%d (%d/%d)", newpagescnt[0],
                  len(source_pages), newpagescnt[1], len(source_pages_set))
    logging.debug("Done")

    return 0
示例#5
0
import getopt
import sys

from util import create_header, execute_request, is_open, create_request_body_anki_connect, \
    create_params_to_authenticate, print_usage, validate_token_address, create_notes, list_by_delimiter

header = create_header()
username = ''
password = ''
mairo_api_address = ''
lessons = ''
deck_name = ''
ENDS_OF_MAIRO_ADDRESS_TO_TOKEN = 'oauth/token'
ANKI_CONNECT_PORT = '8765'
ANKI_CONNECT_ADDRESS = 'localhost'
ANKI_CONNECT_ADDRESS_FULL = 'http://' + ANKI_CONNECT_ADDRESS + ':' + ANKI_CONNECT_PORT


def main(argv):
    process_args(argv)

    print('Checking if AnkiConnect is running...')
    if not is_open(ANKI_CONNECT_ADDRESS, ANKI_CONNECT_PORT):
        print(
            'Finished without success, check if Anki is running and has been installed Anki Connect. =('
        )
        sys.exit()

    print('Synchronizing...')
    print('If synchronization is slow, check Anki, you may not be logged in.')
    execute_request(ANKI_CONNECT_ADDRESS_FULL,
def compress(source, target, inner, pagesize=4096):
    # some info
    logging.debug("Starting compression of %s to %s", repr(source),
                  repr(target))
    logging.debug("Page size: %d", pagesize)

    # pages + page numbers bookkeeping
    pagenrs = dd(list)
    pages = []
    for i, page in enumerate(util.get_pages(source, pagesize=pagesize)):
        pagenrs[page].append(i)
        pages.append(page)
    pages_set = set(pages)

    # remove pages which just occurr once from dictionary and intervalize values
    for page in pagenrs.keys():
        if len(pagenrs[page]) == 1:
            del pagenrs[page]
        else:
            pagenrs[page] = util.intervalize(pagenrs[page])

    # write file
    util.create_dir(".tmp")
    tmphandle, tmpfile = tempfile.mkstemp(dir=".tmp")
    try:
        with open(tmpfile, "wb") as ftmp:
            ftmp.write(struct.pack("<I", len(pagenrs)))
            inorder = []
            seen = set()
            for page in pages:
                if page in pagenrs and page not in seen:
                    inorder.append(page)
                    seen.add(page)
            for page in inorder:
                ftmp.write(page)
            for page in inorder:
                ftmp.write(util.create_interval_list(pagenrs[page]))
            for page in pages:
                if page not in pagenrs:
                    ftmp.write(page)
        with open(tmpfile, "rb") as ftmp, open(target, "wb") as ftarget:
            ftarget.write(
                util.create_header(
                    "intradedup{}".format("" if inner is None else inner),
                    os.path.getsize(source)))
            ftarget.flush()
            if inner is None:
                shutil.copyfileobj(ftmp, ftarget)
            elif inner == "gzip":
                with gzip.GzipFile(fileobj=ftarget, mode="wb",
                                   compresslevel=9) as ftarget:
                    shutil.copyfileobj(ftmp, ftarget)
            elif inner == "bzip2":
                with bz2file.BZ2File(filename=ftarget,
                                     mode="wb",
                                     compresslevel=9) as ftarget:
                    shutil.copyfileobj(ftmp, ftarget)
            elif inner == "7zip":
                p = subprocess.Popen(
                    ["7za", "a", "-an", "-txz", "-mx=9", "-si", "-so", source],
                    stdin=ftmp,
                    stdout=ftarget,
                    stderr=subprocess.PIPE)
                p.communicate()
    finally:
        os.close(tmphandle)
        os.remove(tmpfile)

    # some info
    total = sum(b - a + 1 for l in pagenrs.values() for a, b in l)
    logging.debug("Deduplicated pages: %d/%d (%d/%d)", total, len(pages),
                  len(pagenrs), len(pages_set))
    logging.debug("Done")

    return 0