示例#1
0
def main():

    # crawl and cache
    try:
        with open(XIPH_CACHE, "rb") as h:
            result = pickle.load(h)
    except IOError:
        result = crawl_xiph()
        with open(XIPH_CACHE, "wb") as h:
            pickle.dump(result, h)

    cache = get_cache()

    # add new streams and listeners counts to the cache
    for pl in result:
        for stream in pl.streams:
            if stream not in cache:
                cache[stream] = {}

            if LISTENERCURRENT in cache[stream]:
                cache[stream][LISTENERCURRENT].append(str(pl.listeners))
            else:
                cache[stream][LISTENERCURRENT] = [str(pl.listeners)]

    set_cache(cache)
示例#2
0
def main():
    cache = get_cache()
    failed_uris = get_failed()

    # don't check uris that have enough tags in the cache
    done = set()
    tags = set(NEEDED)
    for key, value in cache.iteritems():
        if not tags - set(value.keys()):
            done.add(key)

    # also don't check failed (to allow multiple partial runs)
    done |= failed_uris

    # get uris for the rest
    uris_todo = set(cache.keys()) - done

    # get tags and replace all results
    new_result, new_failed = get_all_tags(uris_todo)
    for uri, tags in new_result.iteritems():
        if uri in cache:
            cache[uri].update(tags)
        else:
            cache[uri] = tags

    set_failed(failed_uris | set(new_failed))
    set_cache(cache)
def main():

    # crawl and cache
    try:
        with open(XIPH_CACHE, "rb") as h:
            result = pickle.load(h)
    except IOError:
        result = crawl_xiph()
        with open(XIPH_CACHE, "wb") as h:
            pickle.dump(result, h)

    cache = get_cache()

    # add new streams and listeners counts to the cache
    for pl in result:
        for stream in pl.streams:
            if stream not in cache:
                cache[stream] = {}

            if LISTENERCURRENT in cache[stream]:
                cache[stream][LISTENERCURRENT].append(str(pl.listeners))
            else:
                cache[stream][LISTENERCURRENT] = [str(pl.listeners)]

    set_cache(cache)
示例#4
0
def main():
    cache = get_cache()
    failed_uris = get_failed()

    # don't check uris that have enough tags in the cache
    done = set()
    tags = set(NEEDED)
    for key, value in cache.iteritems():
        if not tags - set(value.keys()):
            done.add(key)

    # also don't check failed (to allow multiple partial runs)
    done |= failed_uris

    # get uris for the rest
    uris_todo = set(cache.keys()) - done

    # get tags and replace all results
    new_result, new_failed = get_all_tags(uris_todo)
    for uri, tags in new_result.iteritems():
        if uri in cache:
            cache[uri].update(tags)
        else:
            cache[uri] = tags

    set_failed(failed_uris | set(new_failed))
    set_cache(cache)
示例#5
0
def main():
    """Writes all tags to a file in the following format:

    uri=http://bla.com
    key=value
    key2=value2
    key=value3

    Each uri starts a new entry; there are no newlines; multiple values
    get transformed to multiple key=value pairs.

    tags that start with ~ are metadata not retrieved from the stream.
    e.g. ~listenerpeak is the listener peak value from the shoutcast page.
    """

    cache = get_cache()

    needed = set(NEEDED)
    out = []
    written = 0
    for uri, tags in cache.iteritems():
        if needed - set(tags.keys()):
            continue
        written += 1
        # XXX
        uri = str(uri)
        out.append("uri=" + uri)

        # add current to peak, xiph have only current e.g.
        if LISTENERPEAK not in tags:
            if LISTENERCURRENT in tags:
                tags[LISTENERPEAK] = tags[LISTENERCURRENT]
        else:
            tags[LISTENERPEAK].extend(tags.get(LISTENERCURRENT, []))

        # take the larges one
        peaks = tags.get(LISTENERPEAK, [])
        peaks = map(int, peaks)
        if peaks:
            tags[LISTENERPEAK] = [str(max(peaks))]

        for key, values in tags.iteritems():
            if key not in TAGS:
                continue
            for val in values:
                if isinstance(val, unicode):
                    val = val.encode("utf-8")
                if val in VBL:
                    continue
                out.append(key + "=" + val)

    print "Writing taglist..."
    print written, " stations"
    with open(STATIONFILE, "wb") as h:
        h.write("\n".join(out))

    print "Write compressed version..."
    with open(STATIONFILE + ".bz2", "wb") as h:
        h.write(bz2.compress(open(STATIONFILE, "rb").read(), 9))
示例#6
0
def main():
    """Writes all tags to a file in the following format:

    uri=http://bla.com
    key=value
    key2=value2
    key=value3

    Each uri starts a new entry; there are no newlines; multiple values
    get transformed to multiple key=value pairs.

    tags that start with ~ are metadata not retrieved from the stream.
    e.g. ~listenerpeak is the listener peak value from the shoutcast page.
    """

    cache = get_cache()

    needed = set(NEEDED)
    out = []
    written = 0
    for uri, tags in cache.iteritems():
        if needed - set(tags.keys()):
            continue
        written += 1
        # XXX
        uri = str(uri)
        out.append("uri=" + uri)

        # add current to peak, xiph have only current e.g.
        if LISTENERPEAK not in tags:
            if LISTENERCURRENT in tags:
                tags[LISTENERPEAK] = tags[LISTENERCURRENT]
        else:
            tags[LISTENERPEAK].extend(tags.get(LISTENERCURRENT, []))

        # take the larges one
        peaks = tags.get(LISTENERPEAK, [])
        peaks = map(int, peaks)
        if peaks:
            tags[LISTENERPEAK] = [str(max(peaks))]

        for key, values in tags.iteritems():
            if key not in TAGS:
                continue
            for val in values:
                if isinstance(val, unicode):
                    val = val.encode("utf-8")
                if val in VBL:
                    continue
                out.append(key + "=" + val)

    print "Writing taglist..."
    print written, " stations"
    with open(STATIONFILE, "wb") as h:
        h.write("\n".join(out))

    print "Write compressed version..."
    with open(STATIONFILE + ".bz2", "wb") as h:
        h.write(bz2.compress(open(STATIONFILE, "rb").read(), 9))
示例#7
0
def main():
    cache = get_cache()
    failed_uris = get_failed()
    parse_failed_uris = get_parse_failed()

    uris = cache.keys()

    peak_missing = [uri for uri in uris if LISTENERPEAK not in cache[uri]]
    peak_missing = set(peak_missing) - failed_uris

    # XXX: fetch_stream_infos is the same for each root url
    peak_missing = {get_root(uri) for uri in peak_missing}
    peak_missing = set(peak_missing) - parse_failed_uris
    

    pool = Pool(PROCESSES)
    try:
        pfunc = fetch_stream_infos
        for i, res in enumerate(pool.imap_unordered(pfunc, peak_missing)):
            uri, streams = res

            # save all 1000
            if (i+1) % 1000 == 0:
                set_cache(cache)

            print "%d/%d " % (i+1, len(peak_missing)) + uri + " -> ",
            print "%d new streams" % len(streams)

            if not streams:
                parse_failed_uris.add(uri)

            # add new found uris to cache + listener count
            for stream in streams:
                peak = str(int(stream.peak))
                current = str(int(stream.current))
                uri = stream.stream

                if uri not in cache:
                    cache[uri] = {}

                if LISTENERPEAK in cache[uri]:
                    cache[uri][LISTENERPEAK].append(peak)
                else:
                    cache[uri][LISTENERPEAK] = [peak]

                if LISTENERCURRENT in cache[uri]:
                    cache[uri][LISTENERCURRENT].append(current)
                else:
                    cache[uri][LISTENERCURRENT] = [current]

    except Exception as e:
        print e
    finally:
        set_parse_failed(parse_failed_uris)
        set_cache(cache)
        pool.terminate()
        pool.join()
示例#8
0
def main():
    cache = get_cache()
    failed_uris = get_failed()
    parse_failed_uris = get_parse_failed()

    uris = cache.keys()

    peak_missing = [uri for uri in uris if LISTENERPEAK not in cache[uri]]
    peak_missing = set(peak_missing) - failed_uris

    # XXX: fetch_stream_infos is the same for each root url
    peak_missing = {get_root(uri) for uri in peak_missing}
    peak_missing = set(peak_missing) - parse_failed_uris
    

    pool = Pool(PROCESSES)
    try:
        pfunc = fetch_stream_infos
        for i, res in enumerate(pool.imap_unordered(pfunc, peak_missing)):
            uri, streams = res

            # save all 1000
            if (i+1) % 1000 == 0:
                set_cache(cache)

            print "%d/%d " % (i+1, len(peak_missing)) + uri + " -> ",
            print "%d new streams" % len(streams)

            if not streams:
                parse_failed_uris.add(uri)

            # add new found uris to cache + listener count
            for stream in streams:
                peak = str(int(stream.peak))
                current = str(int(stream.current))
                uri = stream.stream

                if uri not in cache:
                    cache[uri] = {}

                if LISTENERPEAK in cache[uri]:
                    cache[uri][LISTENERPEAK].append(peak)
                else:
                    cache[uri][LISTENERPEAK] = [peak]

                if LISTENERCURRENT in cache[uri]:
                    cache[uri][LISTENERCURRENT].append(current)
                else:
                    cache[uri][LISTENERCURRENT] = [current]

    except Exception as e:
        print e
    finally:
        set_parse_failed(parse_failed_uris)
        set_cache(cache)
        pool.terminate()
        pool.join()
示例#9
0
def get_replica_cache(module):
    ''' Get a read-replica cache of a different module. At
    present, not a read-replica, but this will change in the
    future.

    This is a bad idea, and should be removed in the future'''
    print 'deprecated'

    return get_cache(module)
示例#10
0
def get_replica_cache(module):
    ''' Get a read-replica cache of a different module. At
    present, not a read-replica, but this will change in the
    future.

    This is a bad idea, and should be removed in the future'''
    print 'deprecated'

    return get_cache(module)
示例#11
0
def reset_cache(logger, build_folder, cache_folder, **kwargs):
    """ wipe out the cache folder, optionnaly keeping latest master """
    logger.step("Reseting cache folder: {}".format(cache_folder))

    cache_size, free_space = display_cache_and_free_space(
        logger, build_folder, cache_folder
    )
    logger.std("-------------")

    if kwargs.get("keep_master"):
        tmp_master_fpath = None

        master = get_content("hotspot_master_image")
        master_fpath = os.path.join(cache_folder, master["name"])
        if (
            os.path.exists(master_fpath)
            and get_checksum(master_fpath) == master["checksum"]
        ):
            # latest master to be moved temporarly to build-dir
            tmp_master_fpath = os.path.join(
                build_folder, ".__tmp--{}".format(master["name"])
            )
            logger.std("Keeping your latest master aside: {}".format(master["name"]))
            try:
                shutil.move(master_fpath, tmp_master_fpath)
            except Exception as exp:
                logger.err("Unable to move your latest master into build-dir. Exiting.")
                return 1

    logger.std("Removing cache...", end="")
    try:
        shutil.rmtree(cache_folder)
    except Exception as exp:
        logger.err("FAILED ({}).".format(exp))
    else:
        logger.succ("OK.")

    logger.std("Recreating cache placeholder.")
    cache_folder = get_cache(build_folder)

    if kwargs.get("keep_master"):
        logger.std("Restoring your latest master.")
        try:
            shutil.move(tmp_master_fpath, master_fpath)
        except Exception as exp:
            logger.err("Unable to move back your master file into fresh cache.")
            if tmp_master_fpath is not None:
                logger.err("Please find your master at: {}".format(tmp_master_fpath))
            return 1

    logger.std("-------------")
    display_cache_and_free_space(
        logger, build_folder, cache_folder, cache_size, free_space
    )

    return 0
示例#12
0
def index():
    """
    URL: `/`

    Main page with dropdown widgets from cached data.
    """

    cache_d = get_cache()
    return render_template('index.html', profile_title="DEPG", \
        dropdown_data=cache_d["dropdown_data"], properties=cache_d["properties"])
示例#13
0
 def _decorator(*args):
     cache_ = region_config[manager].get_dbmanager(region)
     key_args = deco_args + args[skip_self:]
     real_key = util.get_cache(namespace, key_args)
     try:
         value = cache_[real_key]
     except KeyError:
         value = func(*args)
         util.async(cache_.set, real_key, value)
     return value
示例#14
0
def main():
    uris = []
    r = requests.get("https://somafm.com/listen/")
    playlists = re.findall('[^"\']*?.pls', r.text)
    for i, pls in enumerate(playlists):
        print "%d/%d" % (i + 1, len(playlists))
        uris.extend(get_pls(pls))

    cache = get_cache()
    for uri in uris:
        if uri not in cache:
            cache[uri] = {}
    set_cache(cache)
示例#15
0
def main():
    def default(logger, build_folder, cache_folder, **kwargs):
        parser.parse_args(["--help"])

    parser = argparse.ArgumentParser(
        description="Cache Folder management tool")
    parser.add_argument("--build",
                        help="Build Folder containing the cache one",
                        required=True)
    parser.set_defaults(func=default)
    subparsers = parser.add_subparsers()

    parser_show = subparsers.add_parser("show", help="List files in cache")
    parser_show.set_defaults(func=list_cache_files)

    parser_clean = subparsers.add_parser(
        "clean", help="Remove obsolete files from cache")
    parser_clean.set_defaults(func=clean_cache)

    parser_reset = subparsers.add_parser("reset",
                                         help="Reset cache folder completely")
    parser_reset.set_defaults(func=reset_cache)
    parser_reset.add_argument(
        "--keep-master",
        help="Keep the latest master image if it exists",
        action="store_true",
    )

    # defaults to help
    args = parser.parse_args(["--help"] if len(sys.argv) < 2 else None)

    logger = CLILogger()
    build_folder = args.build
    if not os.path.exists(build_folder) or not os.path.isdir(build_folder):
        logger.err("Build folder is not a directory.")
        sys.exit(1)
    cache_folder = get_cache(build_folder)

    # ensure we have a proper list of contents and packages to match against
    init(logger)

    sys.exit(args.func(logger, build_folder, cache_folder, **args.__dict__))
示例#16
0
    )
    FUNCTIONS['VLOOKUP'] = wrap_ufunc(
        formulas.functions.look.xlookup, 
        input_parser=lambda val,vec,index,match_type=1,transpose=True: \
            formulas.functions.look._hlookup_parser(val,vec,int(index),match_type,\
            transpose),
        check_error=lambda *a: get_error(a[:1]), excluded={1, 2, 3}
    )

    def _int(x, *args, **kwargs):
        ret = int(x, *args, **kwargs)
        if x < 0:
            ret -= 1
        return ret

    FUNCTIONS['INT'] = wrap_ufunc(_int)

    # Instantiate spreadsheet objects
    global xl
    xl = formulas.ExcelModel()
    wb, context = xl.add_book(get_config()["spreadsheet_file"])
    xl.pushes(*wb.worksheets, context=context)
    xl.finish()
    get_cache(xl, wb)

    app.register_blueprint(bp, url_prefix='/')
    if os.environ.get("ENV", "") == "dev":
        app.run(host='0.0.0.0', port=5001, debug=1)
    else:
        app.run(host='0.0.0.0', port=8000, debug=0)
示例#17
0
def run_installation(
    name,
    timezone,
    language,
    wifi_pwd,
    admin_account,
    kalite,
    aflatoun,
    wikifundi,
    edupi,
    edupi_resources,
    nomad,
    mathews,
    africatik,
    zim_install,
    size,
    logger,
    cancel_event,
    sd_card,
    favicon,
    logo,
    css,
    done_callback=None,
    build_dir=".",
    filename=None,
    qemu_ram="2G",
    shrink=False,
):

    logger.start(bool(sd_card))

    logger.stage("init")
    cache_folder = get_cache(build_dir)

    try:
        logger.std("Preventing system from sleeping")
        sleep_ref = prevent_sleep(logger)

        logger.step("Check System Requirements")
        logger.std("Please read {} for details".format(requirements_url))
        sysreq_ok, missing_deps = host_matches_requirements(build_dir)
        if not sysreq_ok:
            raise SystemError(
                "Your system does not matches system requirements:\n{}".format(
                    "\n".join([" - {}".format(dep) for dep in missing_deps])))

        logger.step("Ensure user files are present")
        for user_fpath in (edupi_resources, favicon, logo, css):
            if (user_fpath is not None and not isremote(user_fpath)
                    and not os.path.exists(user_fpath)):
                raise ValueError(
                    "Specified file is not available ({})".format(user_fpath))

        logger.step("Prepare Image file")

        # set image names
        if not filename:
            filename = "hotspot-{}".format(
                datetime.today().strftime("%Y_%m_%d-%H_%M_%S"))

        image_final_path = os.path.join(build_dir, filename + ".img")
        image_building_path = os.path.join(build_dir,
                                           filename + ".BUILDING.img")
        image_error_path = os.path.join(build_dir, filename + ".ERROR.img")

        # loop device mode on linux (for mkfs in userspace)
        if sys.platform == "linux":
            loop_dev = guess_next_loop_device(logger)
            if loop_dev and not can_write_on(loop_dev):
                logger.step("Change loop device mode ({})".format(sd_card))
                previous_loop_mode = allow_write_on(loop_dev, logger)
            else:
                previous_loop_mode = None

        base_image = get_content("hotspot_master_image")
        # harmonize options
        packages = [] if zim_install is None else zim_install
        kalite_languages = [] if kalite is None else kalite
        wikifundi_languages = [] if wikifundi is None else wikifundi
        aflatoun_languages = ["fr", "en"] if aflatoun else []

        if edupi_resources and not isremote(edupi_resources):
            logger.step("Copying EduPi resources into cache")
            shutil.copy(edupi_resources, cache_folder)

        # prepare ansible options
        ansible_options = {
            "name": name,
            "timezone": timezone,
            "language": language,
            "language_name": dict(data.hotspot_languages)[language],
            "edupi": edupi,
            "edupi_resources": edupi_resources,
            "nomad": nomad,
            "mathews": mathews,
            "africatik": africatik,
            "wikifundi_languages": wikifundi_languages,
            "aflatoun_languages": aflatoun_languages,
            "kalite_languages": kalite_languages,
            "packages": packages,
            "wifi_pwd": wifi_pwd,
            "admin_account": admin_account,
            "disk_size": size,
            "root_partition_size": base_image.get("root_partition_size"),
        }
        extra_vars, secret_keys = ansiblecube.build_extra_vars(
            **ansible_options)

        # display config in log
        logger.step("Dumping Hotspot Configuration")
        logger.raw_std(
            json.dumps(
                {
                    k: "****" if k in secret_keys else v
                    for k, v in extra_vars.items()
                },
                indent=4,
            ))

        # gen homepage HTML
        homepage_path = save_homepage(
            generate_homepage(logger, ansible_options))
        logger.std("homepage saved to: {}".format(homepage_path))

        # Download Base image
        logger.stage("master")
        logger.step("Retrieving base image file")

        rf = download_content(base_image, logger, build_dir)
        if not rf.successful:
            logger.err(
                "Failed to download base image.\n{e}".format(e=rf.exception))
            sys.exit(1)
        elif rf.found:
            logger.std("Reusing already downloaded base image ZIP file")
        logger.progress(0.5)

        # extract base image and rename
        logger.step("Extracting base image from ZIP file")
        unzip_file(
            archive_fpath=rf.fpath,
            src_fname=base_image["name"].replace(".zip", ""),
            build_folder=build_dir,
            dest_fpath=image_building_path,
        )
        logger.std("Extraction complete: {p}".format(p=image_building_path))
        logger.progress(0.9)

        if not os.path.exists(image_building_path):
            raise IOError(
                "image path does not exists: {}".format(image_building_path))

        logger.step("Testing mount procedure")
        if not test_mount_procedure(image_building_path, logger, True):
            raise ValueError("thorough mount procedure failed")

        # collection contains both downloads and processing callbacks
        # for all requested contents
        collection = get_collection(
            edupi=edupi,
            edupi_resources=edupi_resources,
            nomad=nomad,
            mathews=mathews,
            africatik=africatik,
            packages=packages,
            kalite_languages=kalite_languages,
            wikifundi_languages=wikifundi_languages,
            aflatoun_languages=aflatoun_languages,
        )

        # download contents into cache
        logger.stage("download")
        logger.step("Starting all content downloads")
        downloads = list(get_all_contents_for(collection))
        archives_total_size = sum([c["archive_size"] for c in downloads])
        retrieved = 0

        for dl_content in downloads:
            logger.step("Retrieving {name} ({size})".format(
                name=dl_content["name"],
                size=human_readable_size(dl_content["archive_size"]),
            ))

            rf = download_content(dl_content, logger, build_dir)
            if not rf.successful:
                logger.err("Error downloading {u} to {p}\n{e}".format(
                    u=dl_content["url"], p=rf.fpath, e=rf.exception))
                raise rf.exception if rf.exception else IOError
            elif rf.found:
                logger.std("Reusing already downloaded {p}".format(p=rf.fpath))
            else:
                logger.std("Saved `{p}` successfuly: {s}".format(
                    p=dl_content["name"],
                    s=human_readable_size(rf.downloaded_size)))
            retrieved += dl_content["archive_size"]
            logger.progress(retrieved, archives_total_size)

        # check edupi resources compliance
        if edupi_resources:
            logger.step("Verifying EduPi resources file names")
            exfat_compat, exfat_errors = ensure_zip_exfat_compatible(
                get_content_cache(get_alien_content(edupi_resources),
                                  cache_folder, True))
            if not exfat_compat:
                raise ValueError("Your EduPi resources archive is incorrect.\n"
                                 "It should be a ZIP file of a root folder "
                                 "in which all files have exfat-compatible "
                                 "names (no {chars})\n... {fnames}".format(
                                     chars=" ".join(EXFAT_FORBIDDEN_CHARS),
                                     fnames="\n... ".join(exfat_errors),
                                 ))
            else:
                logger.std("EduPi resources archive OK")

        # instanciate emulator
        logger.stage("setup")
        logger.step("Preparing qemu VM")
        emulator = qemu.Emulator(
            data.vexpress_boot_kernel,
            data.vexpress_boot_dtb,
            image_building_path,
            logger,
            ram=qemu_ram,
        )

        # Resize image
        logger.step("Resizing image file from {s1} to {s2}".format(
            s1=human_readable_size(emulator.get_image_size()),
            s2=human_readable_size(size),
        ))
        if size < emulator.get_image_size():
            logger.err("cannot decrease image size")
            raise ValueError("cannot decrease image size")

        emulator.resize_image(size)

        # Run emulation
        logger.step("Starting-up VM (first-time)")
        with emulator.run(cancel_event) as emulation:
            # copying ansiblecube again into the VM
            # should the master-version been updated
            logger.step("Copy ansiblecube")
            emulation.exec_cmd("sudo /bin/rm -rf {}".format(
                ansiblecube.ansiblecube_path))
            emulation.put_dir(data.ansiblecube_path,
                              ansiblecube.ansiblecube_path)

            logger.step("Run ansiblecube for `resize`")
            ansiblecube.run(emulation, ["resize"], extra_vars, secret_keys)

        logger.step("Starting-up VM (second-time)")
        with emulator.run(cancel_event) as emulation:

            logger.step("Run ansiblecube phase I")
            ansiblecube.run_phase_one(
                emulation,
                extra_vars,
                secret_keys,
                homepage=homepage_path,
                logo=logo,
                favicon=favicon,
                css=css,
            )

        # wait for QEMU to release file (windows mostly)
        time.sleep(10)

        # mount image's 3rd partition on host
        logger.stage("copy")

        logger.step("Formating data partition on host")
        format_data_partition(image_building_path, logger)

        logger.step("Mounting data partition on host")
        # copy contents from cache to mount point
        try:
            mount_point, device = mount_data_partition(image_building_path,
                                                       logger)
            logger.step("Processing downloaded content onto data partition")
            expanded_total_size = sum([c["expanded_size"] for c in downloads])
            processed = 0

            for category, content_dl_cb, content_run_cb, cb_kwargs in collection:

                logger.step("Processing {cat}".format(cat=category))
                content_run_cb(cache_folder=cache_folder,
                               mount_point=mount_point,
                               logger=logger,
                               **cb_kwargs)
                # size of expanded files for this category (for progress)
                processed += sum(
                    [c["expanded_size"] for c in content_dl_cb(**cb_kwargs)])
                logger.progress(processed, expanded_total_size)
        except Exception as exp:
            try:
                unmount_data_partition(mount_point, device, logger)
            except NameError:
                pass  # if mount_point or device are not defined
            raise exp

        time.sleep(10)

        # unmount partition
        logger.step("Unmounting data partition")
        unmount_data_partition(mount_point, device, logger)

        time.sleep(10)

        # rerun emulation for discovery
        logger.stage("move")
        logger.step("Starting-up VM (third-time)")
        with emulator.run(cancel_event) as emulation:
            logger.step("Run ansiblecube phase II")
            ansiblecube.run_phase_two(emulation, extra_vars, secret_keys)

        if shrink:
            logger.step("Shrink size of physical image file")
            # calculate physical size of image
            required_image_size = get_required_image_size(collection)
            if required_image_size + ONE_GB >= size:
                # less than 1GB difference, don't bother
                pass
            else:
                # set physical size to required + margin
                physical_size = math.ceil(
                    required_image_size / ONE_GB) * ONE_GB
                emulator.resize_image(physical_size, shrink=True)

        # wait for QEMU to release file (windows mostly)
        logger.succ("Image creation successful.")
        time.sleep(20)

    except Exception as e:
        logger.failed(str(e))

        # display traceback on logger
        logger.std("\n--- Exception Trace ---\n{exp}\n---".format(
            exp=traceback.format_exc()))

        # Set final image filename
        if os.path.isfile(image_building_path):
            os.rename(image_building_path, image_error_path)

        error = e
    else:
        try:
            # Set final image filename
            tries = 0
            while True:
                try:
                    os.rename(image_building_path, image_final_path)
                except Exception as exp:
                    logger.err(exp)
                    tries += 1
                    if tries > 3:
                        raise exp
                    time.sleep(5 * tries)
                    continue
                else:
                    logger.std(
                        "Renamed image file to {}".format(image_final_path))
                    break

            # Write image to SD Card
            if sd_card:
                logger.stage("write")
                logger.step("Writting image to SD-card ({})".format(sd_card))

                try:

                    etcher_writer = EtcherWriterThread(args=(image_final_path,
                                                             sd_card, logger))
                    cancel_event.register_thread(thread=etcher_writer)
                    etcher_writer.start()
                    etcher_writer.join(timeout=2)  # make sure it started
                    while etcher_writer.is_alive():
                        pass
                    logger.std("not alive")
                    etcher_writer.join(timeout=2)
                    cancel_event.unregister_thread()
                    if etcher_writer.exp is not None:
                        raise etcher_writer.exp

                    logger.std("Done writing and verifying.")
                    time.sleep(5)
                except Exception:
                    logger.succ("Image created successfuly.")
                    logger.err(
                        "Writing or verification of Image to your SD-card failed.\n"
                        "Please use a third party tool to flash your image "
                        "onto your SD-card. See File menu for links to Etcher."
                    )
                    raise Exception("Failed to write Image to SD-card")

        except Exception as e:
            logger.failed(str(e))

            # display traceback on logger
            logger.std("\n--- Exception Trace ---\n{exp}\n---".format(
                exp=traceback.format_exc()))
            error = e
        else:
            logger.complete()
            error = None
    finally:
        logger.std("Restoring system sleep policy")
        restore_sleep_policy(sleep_ref, logger)

        if sys.platform == "linux" and loop_dev and previous_loop_mode:
            logger.step("Restoring loop device ({}) mode".format(loop_dev))
            restore_mode(loop_dev, previous_loop_mode, logger)

        # display durations summary
        logger.summary()

    if done_callback:
        done_callback(error)

    return error
示例#18
0
def get_replica_cache(module):
    ''' Get a read-replica cache of a different module. At
    present, not a read-replica, but this will change in the
    future. '''
    return get_cache(module)
示例#19
0
def generate():
    """
    URL: `/`

    Use submitted values to generate a profile.
    """

    # Retain selected values
    profile_notes_list = []
    cache_d = get_cache()
    dropdown_data = cache_d["dropdown_data"]
    for coord, cat_data in dropdown_data.items():
        profile_notes_list.append("{}: {}".format(cat_data["category"], request.values.get(coord, "")))
        selected_value = request.values.get(coord, "")
        try:
            selected_index = cat_data["values"].index(selected_value)
        except ValueError:
            selected_index = 0
        cat_data["selected_idx"] = selected_index
    profile_notes = ", ".join(profile_notes_list)
    profile_title = request.values.get("profile_title")
    if not profile_title:
        profile_title = "DEPG"

    # Calculate params based on provided input
    sol = xl.calculate(
        inputs={addr(k): v for k, v in dict(request.values).items()}
    )

    # Pull out values we care about and round as needed
    result_d = {}
    for key, coord in get_config()["result_coords"].items():
        result_rounding = get_config()["result_rounding"][key]
        value = sol[addr(coord)].value[0,0]
        if result_rounding is not None:
            result_d[key] = round(decimal.Decimal(value), result_rounding)
        else:
            result_d[key] = value

    # Add profile_title, target weight, pressure peak, and shot time as well
    profile_notes += "\n\nPressure peak: {}, Stop at weight: {}, Time: {}" . \
        format(result_d["graph_pressure_peak"], result_d["graph_stop_on_weight"], result_d["graph_time"])
    result_d["profile_notes"] = profile_notes
    result_d["profile_title"] = profile_title

    # Fill in profile base and advanced steps
    base = {}
    for k,v in get_config()["profile"].items():
        if k == "advanced_shot":
            continue
        if type(v) is str:
            base[k] = v.format(**result_d)
        else:
            base[k] = v
    steps = []
    for raw_step in get_config()["profile"]["advanced_shot"]:
        step = {}
        for k,v in raw_step.items():
            if type(v) is str:
                step[k] = v.format(**result_d)
            else:
                step[k] = v
        steps.append(step)

    # Convert to TCL
    steps_tcl = [tkinter._stringify(list(dict_to_list(e))) for e in steps]
    profile = "advanced_shot {" + " ".join(steps_tcl) + "}\n"
    for k,v in base.items():
        profile += "{} {}\n".format(tkinter._stringify(k), tkinter._stringify(v))

    return render_template('index.html', profile_title=profile_title, \
        dropdown_data=cache_d["dropdown_data"], profile=profile, properties=cache_d["properties"])
示例#20
0
    print("  {name}:{space} {value}".format(name=name,
                                            value=getattr(args, name),
                                            space=" " * (longest - len(name))))

# check disk space
collection = get_collection(
    edupi=args.edupi == "yes",
    edupi_resources=args.edupi_resources,
    nomad=args.nomad == "yes",
    mathews=args.mathews == "yes",
    packages=args.zim_install,
    kalite_languages=args.kalite,
    wikifundi_languages=args.wikifundi,
    aflatoun_languages=["fr", "en"] if args.aflatoun == "yes" else [],
)
cache_folder = get_cache(args.build_dir)
# how much space is available on the build directory?
avail_space_in_build_dir = get_free_space_in_dir(args.build_dir)
try:
    # how much space do we need to build the image?
    space_required_to_build = get_required_building_space(
        collection, cache_folder, args.output_size)
    # how large should the image be?
    required_image_size = get_required_image_size(collection)
except FileNotFoundError as exp:
    print("Supplied File Not Found: {}".format(exp.filename), file=sys.stderr)
    sys.exit(1)
base_image_size = get_content("hotspot_master_image")["expanded_size"]

if args.size < base_image_size:
    print(
示例#21
0
def get_content_cache(content, folder, is_cache_folder=False):
    """ shortcut to content's fpath from build_folder or cache_folder """

    cache_folder = folder if is_cache_folder else get_cache(folder)
    return os.path.join(cache_folder, content.get("name"))