Beispiel #1
0
def read_label_file(filename: str) -> List[Dict]:
    """ Reads proximal femur detection labels from a label-studio json file and returns
        it's content in a cleaned up way.
    :param filename: Path to a label-studio json file
    :return: List of dictionary containing the information:
        (id: str, flipped: int, left_incomplete: int, left_incomplete_shaft:int, left_implant,
         right_incomplete: int, right_incomplete_shaft:int, right_implant,
         left_bbox_x: float, left_bbox_y: float, left_bbox_w: float, left_bbox_h: float,
         right_bbox_x: float, right_bbox_y: float, right_bbox_w: float, right_bbox_h: float) where the id
         is stored in the format <SOF_ID>V<Visit> and all int values are either 0 (false/absent) or 1 (true/present).
         The bbox coordinates are given in absolute pixel coordinates.
    """
    import json
    import re
    from pathlib import Path
    from zipfile import ZipFile

    labels = []

    label_path = Path(filename)

    open_file = lambda: open(filename, 'r')

    skipped = []

    if label_path.suffix == '.zip':
        zip_file = ZipFile(label_path, 'r')
        open_file = zip_file.open('result.json', 'r')

    with open_file() as fh:
        for item in json.load(fh):
            matches = re.match('^.*[/-]?([a-zA-B]*[0-9]+)V([0-9])+(L|R)-([0-9]+)x([0-9]+)\.png$', item['image']).groups()
            image_filename = re.match('^(.*/)?([^/]+\.png)$', item['image']).groups()[-1]
            if not matches or len(matches) != 5:
                raise ValueError(f"Annotated image '{item['image']}' has wrong format!")

            image_id = int(matches[0])
            visit = int(matches[1])
            lr = matches[2]
            width = int(matches[3])
            height = int(matches[4])

            if "labels" in item:
                image_labels = item['labels'] if isinstance(item['labels'], List) else [item['labels']]
            else:
                image_labels = []
            incomplete = 1 if "Incomplete" in image_labels else 0
            implant = 1 if "Implant" in image_labels else 0
            upside_down = 1 if "UpsideDown" in image_labels else 0

            invalid_keypoints = (incomplete + implant) > 0

            if not invalid_keypoints and ("keypoints" not in item or len(item['keypoints']) != 12):
                import sys
                print(f"Annotation for {image_filename} has invalid keypoints. Skipping.", file=sys.stderr)
                skipped.append(image_filename)
                continue

            entry = {
                'filename': image_filename,
                'id': image_id,
                'visit': visit,
                'left_right': lr,
                'upside_down': upside_down,
                'incomplete': incomplete,
                'implant': implant,
                'width': width,
                'height': height
            }

            keypoints = item['keypoints'] if not invalid_keypoints else [i for i in range(12)]

            bbox_min_x = min([float(kp['x']) for kp in keypoints]) if not invalid_keypoints else 0.0
            bbox_max_x = max([float(kp['x']) for kp in keypoints]) if not invalid_keypoints else 100.0
            bbox_min_y = min([float(kp['y']) for kp in keypoints]) if not invalid_keypoints else 0.0
            bbox_max_y = max([float(kp['y']) for kp in keypoints]) if not invalid_keypoints else 100.0

            entry['bbox_min_x'] = bbox_min_x
            entry['bbox_max_x'] = bbox_max_x
            entry['bbox_min_y'] = bbox_min_y
            entry['bbox_max_y'] = bbox_max_y

            for index, kp in enumerate(keypoints):
                entry[f"keypoint_x_{index}"] = float(kp['x']) if not invalid_keypoints else 0.0
                entry[f"keypoint_y_{index}"] = float(kp['y']) if not invalid_keypoints else 0.0

            labels.append(entry)

    return labels, skipped
Beispiel #2
0
def parse(file):
    with ZipFile(file, 'r') as zip_obj:
        file_names = zip_obj.namelist()
        repository = CrateRepository()
        for file_name in file_names:
            parse_single_file(zip_obj, file_name, repository)
Beispiel #3
0
def jlink_new_jdk(jdk,
                  dst_jdk_dir,
                  module_dists,
                  root_module_names=None,
                  missing_export_target_action='create',
                  with_source=lambda x: True,
                  vendor_info=None):
    """
    Uses jlink from `jdk` to create a new JDK image in `dst_jdk_dir` with `module_dists` and
    their dependencies added to the JDK image, replacing any existing modules of the same name.

    :param JDKConfig jdk: source JDK
    :param str dst_jdk_dir: path to use for the jlink --output option
    :param list module_dists: list of distributions defining modules
    :param list root_module_names: list of strings naming the module root set for the new JDK image.
                     The named modules must either be in `module_dists` or in `jdk`. If None, then
                     the root set will be all the modules in ``module_dists` and `jdk`.
    :param str missing_export_target_action: the action to perform for a qualifed export target that
                     is not present in `module_dists` and does not have a hash stored in java.base.
                     The choices are:
                       "create" - an empty module is created
                        "error" - raise an error
                           None - do nothing
    :param lambda with_source: returns True if the sources of a module distribution must be included in the new JDK
    :param dict vendor_info: values for the jlink vendor options added by JDK-8232080
    """
    assert callable(with_source)

    if jdk.javaCompliance < '9':
        mx.abort('Cannot derive a new JDK from ' + jdk.home +
                 ' with jlink since it is not JDK 9 or later')

    exploded_java_base_module = join(jdk.home, 'modules', 'java.base')
    if exists(exploded_java_base_module):
        mx.abort(
            'Cannot derive a new JDK from ' + jdk.home +
            ' since it appears to be a developer build with exploded modules')

    jimage = join(jdk.home, 'lib', 'modules')
    jmods_dir = join(jdk.home, 'jmods')
    if not isfile(jimage):
        mx.abort('Cannot derive a new JDK from ' + jdk.home + ' since ' +
                 jimage + ' is missing or is not an ordinary file')
    if not isdir(jmods_dir):
        mx.abort('Cannot derive a new JDK from ' + jdk.home + ' since ' +
                 jmods_dir + ' is missing or is not a directory')

    jdk_modules = {jmd.name: jmd for jmd in jdk.get_modules()}
    modules = [as_java_module(dist, jdk) for dist in module_dists]
    all_module_names = frozenset(
        list(jdk_modules.keys()) + [m.name for m in modules])

    # Read hashes stored in java.base (the only module in the JDK where hashes are stored)
    out = mx.LinesOutputCapture()
    mx.run([
        jdk.exe_path('jmod'), 'describe',
        jdk_modules['java.base'].get_jmod_path()
    ],
           out=out)
    lines = out.lines
    hashes = {}
    for line in lines:
        if line.startswith('hashes'):
            parts = line.split()
            assert len(
                parts
            ) == 4, 'expected hashes line to have 4 fields, got {} fields: {}'.format(
                len(parts), line)
            _, module_name, algorithm, hash_value = parts
            hashes[module_name] = (algorithm, hash_value)

    build_dir = mx.ensure_dir_exists(join(dst_jdk_dir + ".build"))
    try:
        # Handle targets of qualified exports that are not present in `modules`
        target_requires = {}
        for jmd in modules:
            for targets in jmd.exports.values():
                for target in targets:
                    if target not in all_module_names and target not in hashes:
                        target_requires.setdefault(target, set()).add(jmd.name)
        if target_requires and missing_export_target_action is not None:
            if missing_export_target_action == 'error':
                mx.abort(
                    'Target(s) of qualified exports cannot be resolved: ' +
                    '.'.join(target_requires.keys()))
            assert missing_export_target_action == 'create', 'invalid value for missing_export_target_action: ' + str(
                missing_export_target_action)

            extra_modules = []
            for name, requires in target_requires.items():
                module_jar = join(build_dir, name + '.jar')
                jmd = JavaModuleDescriptor(
                    name, {},
                    requires={module: []
                              for module in requires},
                    uses=set(),
                    provides={},
                    jarpath=module_jar)
                extra_modules.append(jmd)
                module_build_dir = mx.ensure_dir_exists(join(build_dir, name))
                module_info_java = join(module_build_dir, 'module-info.java')
                module_info_class = join(module_build_dir, 'module-info.class')
                with open(module_info_java, 'w') as fp:
                    print(jmd.as_module_info(), file=fp)
                mx.run([
                    jdk.javac, '-d', module_build_dir,
                    '--limit-modules=java.base,' +
                    ','.join(jmd.requires.keys()),
                    '--module-path=' + os.pathsep.join(
                        (m.jarpath for m in modules)), module_info_java
                ])
                with ZipFile(module_jar, 'w') as zf:
                    zf.write(module_info_class, basename(module_info_class))
                if exists(jmd.get_jmod_path()):
                    os.remove(jmd.get_jmod_path())
                mx.run([
                    jdk.javac.replace('javac', 'jmod'), 'create',
                    '--class-path=' + module_build_dir,
                    jmd.get_jmod_path()
                ])

            modules.extend(extra_modules)
            all_module_names = frozenset(
                list(jdk_modules.keys()) + [m.name for m in modules])

        # Extract src.zip from source JDK
        jdk_src_zip = join(jdk.home, 'lib', 'src.zip')
        dst_src_zip_contents = {}
        if isfile(jdk_src_zip):
            mx.logv('[Extracting ' + jdk_src_zip + ']')
            with ZipFile(jdk_src_zip, 'r') as zf:
                for name in zf.namelist():
                    if not name.endswith('/'):
                        dst_src_zip_contents[name] = zf.read(name)
        else:
            mx.warn("'{}' does not exist or is not a file".format(jdk_src_zip))

        # Edit lib/security/default.policy in java.base
        patched_java_base = join(build_dir, 'java.base.jmod')
        with open(join(jmods_dir, 'java.base.jmod'),
                  'rb') as src_f, open(patched_java_base, 'wb') as dst_f:
            jmod_header = src_f.read(4)
            if len(jmod_header) != 4 or jmod_header != b'JM\x01\x00':
                raise mx.abort("Unexpected jmod header: " +
                               b2a_hex(jmod_header).decode('ascii'))
            dst_f.write(jmod_header)
            policy_result = 'not found'
            with ZipFile(src_f, 'r') as src_zip, ZipFile(
                    dst_f, 'w', src_zip.compression) as dst_zip:
                for i in src_zip.infolist():
                    if i.filename[-1] == '/':
                        continue
                    src_member = src_zip.read(i)
                    if i.filename == 'lib/security/default.policy':
                        if 'grant codeBase "jrt:/com.oracle.graal.graal_enterprise"'.encode(
                                'utf-8') in src_member:
                            policy_result = 'unmodified'
                        else:
                            policy_result = 'modified'
                            src_member += """
grant codeBase "jrt:/com.oracle.graal.graal_enterprise" {
    permission java.security.AllPermission;
};
""".encode('utf-8')
                    dst_zip.writestr(i, src_member)
            if policy_result == 'not found':
                raise mx.abort(
                    "Couldn't find `lib/security/default.policy` in " +
                    join(jmods_dir, 'java.base.jmod'))

        for jmd in modules:
            # Remove existing sources for all the modules that we include
            dst_src_zip_contents = {
                key: dst_src_zip_contents[key]
                for key in dst_src_zip_contents if not key.startswith(jmd.name)
            }

            if with_source(jmd.dist):
                # Add the sources that we can share.
                # Extract module sources
                jmd_src_zip = jmd.jarpath[0:-len('.jar')] + '.src.zip'
                if isfile(jmd_src_zip):
                    mx.logv('[Extracting ' + jmd_src_zip + ']')
                    with ZipFile(jmd_src_zip, 'r') as zf:
                        for name in zf.namelist():
                            if not name.endswith('/'):
                                dst_src_zip_contents[jmd.name + '/' +
                                                     name] = zf.read(name)

                # Add module-info.java to sources
                dst_src_zip_contents[jmd.name +
                                     '/module-info.java'] = jmd.as_module_info(
                                         extras_as_comments=False)

        # Now build the new JDK image with jlink
        jlink = [jdk.javac.replace('javac', 'jlink')]

        if jdk_enables_jvmci_by_default(jdk):
            # On JDK 9+, +EnableJVMCI forces jdk.internal.vm.ci to be in the root set
            jlink += ['-J-XX:-EnableJVMCI', '-J-XX:-UseJVMCICompiler']
        if root_module_names is not None:
            missing = frozenset(root_module_names) - all_module_names
            if missing:
                mx.abort(
                    'Invalid module(s): {}.\nAvailable modules: {}'.format(
                        ','.join(missing), ','.join(sorted(all_module_names))))
            jlink.append('--add-modules=' + ','.join(root_module_names))
        else:
            jlink.append('--add-modules=' + ','.join(sorted(all_module_names)))

        module_path = patched_java_base + os.pathsep + jmods_dir
        if modules:
            module_path = os.pathsep.join(
                (m.get_jmod_path(respect_stripping=True)
                 for m in modules)) + os.pathsep + module_path
        jlink.append('--module-path=' + module_path)
        jlink.append('--output=' + dst_jdk_dir)

        # These options are inspired by how OpenJDK runs jlink to produce the final runtime image.
        jlink.extend([
            '-J-XX:+UseSerialGC', '-J-Xms32M', '-J-Xmx512M',
            '-J-XX:TieredStopAtLevel=1'
        ])
        jlink.append('-J-Dlink.debug=true')
        jlink.append('--dedup-legal-notices=error-if-not-same-content')
        jlink.append('--keep-packaged-modules=' + join(dst_jdk_dir, 'jmods'))

        if jdk_has_new_jlink_options(jdk):
            if jdk_omits_warning_for_jlink_set_ThreadPriorityPolicy(jdk):
                thread_priority_policy_option = ' -XX:ThreadPriorityPolicy=1'
            else:
                mx.logv('[Creating JDK without -XX:ThreadPriorityPolicy=1]')
                thread_priority_policy_option = ''

            if any((m.name == 'jdk.internal.vm.compiler' for m in modules)):
                jlink.append(
                    '--add-options=-XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCIProduct -XX:-UnlockExperimentalVMOptions'
                    + thread_priority_policy_option)
            else:
                # Don't default to using JVMCI as JIT unless Graal is being updated in the image.
                # This avoids unexpected issues with using the out-of-date Graal compiler in
                # the JDK itself.
                jlink.append(
                    '--add-options=-XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCIProduct -XX:-UseJVMCICompiler -XX:-UnlockExperimentalVMOptions'
                    + thread_priority_policy_option)
            if vendor_info is not None:
                for name, value in vendor_info.items():
                    jlink.append('--' + name + '=' + value)

        # TODO: investigate the options below used by OpenJDK to see if they should be used:
        # --release-info: this allow extra properties to be written to the <jdk>/release file
        # --order-resources: specifies order of resources in generated lib/modules file.
        #       This is apparently not so important if a CDS archive is available.
        # --generate-jli-classes: pre-generates a set of java.lang.invoke classes.
        #       See https://github.com/openjdk/jdk/blob/master/make/GenerateLinkOptData.gmk
        mx.logv('[Creating JDK image in {}]'.format(dst_jdk_dir))
        mx.run(jlink)

        dst_src_zip = join(dst_jdk_dir, 'lib', 'src.zip')
        mx.logv('[Creating ' + dst_src_zip + ']')
        with ZipFile(dst_src_zip,
                     'w',
                     compression=ZIP_DEFLATED,
                     allowZip64=True) as zf:
            for name, contents in sorted(dst_src_zip_contents.items()):
                zf.writestr(name, contents)

        mx.logv('[Copying static libraries]')
        lib_prefix = mx.add_lib_prefix('')
        lib_suffix = '.lib' if mx.is_windows() else '.a'
        lib_directory = join(jdk.home, 'lib')
        dst_lib_directory = join(dst_jdk_dir, 'lib')
        for f in os.listdir(lib_directory):
            if f.startswith(lib_prefix) and f.endswith(lib_suffix):
                lib_path = join(lib_directory, f)
                if isfile(lib_path):
                    shutil.copy2(lib_path, dst_lib_directory)

        # Build the list of modules whose classes might have annotations
        # to be processed by native-image (GR-15192).
        with open(join(dst_jdk_dir, 'lib', 'native-image-modules.list'),
                  'w') as fp:
            print(
                '# Modules whose classes might have annotations processed by native-image',
                file=fp)
            for m in modules:
                print(m.name, file=fp)

    finally:
        if not mx.get_opts().verbose:
            # Preserve build directory so that javac command can be re-executed
            # by cutting and pasting verbose output.
            shutil.rmtree(build_dir)

    # Create CDS archive (https://openjdk.java.net/jeps/341).
    out = mx.OutputCapture()
    mx.logv('[Creating CDS shared archive]')
    if mx.run([
            mx.exe_suffix(join(dst_jdk_dir, 'bin', 'java')), '-Xshare:dump',
            '-Xmx128M', '-Xms128M'
    ],
              out=out,
              err=out,
              nonZeroIsFatal=False) != 0:
        mx.log(out.data)
        mx.abort('Error generating CDS shared archive')
def main():
    # path to folder which needs to be zipped
    directory = './LexConfig'

    # calling function to get all file paths in the directory
    file_paths = get_all_file_paths(directory)

    # printing the list of file available in config folder
    print("Files Avaialble In LexConfig Folder:")
    fileCnt=0

    for file_name in file_paths:
        fileCnt += 1
        print(file_name)


    if fileCnt > 1:
        print ("===> Folder Contains More Than One Lex Configuration !!! Please Check")
        sys.exit()

    # writing files to a zipfile
    with ZipFile('./LexConfig/LexConfig.zip','w') as zip:
        # writing each file one by one
        for file in file_paths:
            zip.write(file)

    print('The Lex Configuration Zipped Successfully!')

    print ("------------------------------------------------------------------------------------------------------")
    print ("Exporting The Lex Configuration To AWS Lex")
    print ("------------------------------------------------------------------------------------------------------")

    proc = subprocess.Popen(['aws', 'lex-models', 'start-import', '--payload', 'fileb://LexConfig/LexConfig.zip', '--resource-type', LexObj, '--merge-strategy', 'OVERWRITE_LATEST'], stdout=PIPE, stderr=PIPE)

    out, err = proc.communicate()
    print (err)
    out1 = json.loads(out)
    bot = out1['name']
    env = bot.split('_')[-1]
    print ("===> Got the Environment As : "+env)
    print ("===> Bot : " + bot)
    print ("------------------------------------------------------------------------------------------------------")
    print ("Getting Bot Latest Version")
    print ("------------------------------------------------------------------------------------------------------")

    time.sleep(10)

    LexBot  = boto3.client('lex-models', region_name=region)
    botVersions = LexBot.get_bot_versions(name=bot)
    #print botVersions

    del(botVersions['ResponseMetadata'])

    class DatetimeEncoder(json.JSONEncoder):
        def default(self, obj):
            try:
                return super(DatetimeEncoder, obj).default(obj)
            except TypeError:
                return str(obj)

    botVer  = json.loads(json.dumps(botVersions, cls=DatetimeEncoder))
    #print botVer
    for ver in botVer:
        #print ver
        for ver1 in botVer[ver]:
                LatestVer = ver1
                LatestVer=str(LatestVer["version"])
                print ("Inside Of For :" + LatestVer)

    #print "Latest BOT Version Is : " + LatestVer

    try:
        get_Bot_Alias = LexBot.get_bot_alias(name=env, botName=bot)
        botAlias  = json.loads(json.dumps(get_Bot_Alias, cls=DatetimeEncoder))
        del(botAlias['ResponseMetadata'])
        botAlias_cs = botAlias['checksum']
        print ("===> Bot Alias CheckSum Is : "+ botAlias_cs)
        put_Bot_Alias = LexBot.put_bot_alias(name=env, description='Alias For The Environment', botName=bot, botVersion=LatestVer, checksum=botAlias_cs)
        print ("===> Updated The BOT Alias : "+env)
    except:
        put_Bot_Alias = LexBot.put_bot_alias(name=env, description='Alias For The Environment', botName=bot, botVersion=LatestVer)
        print ("===> Created BOT Alias : "+env)

    exitcode = proc.returncode

    if exitcode == 0:

        print ("====> Sucessfully Imported The Configuration <====")

        # path to folder which needs to be zipped
        directory = './LexConfig'

        lexOutput = open( './LexConfig/LexConfig_Import_Status.txt', 'wb' )
        pickle.dump(out,lexOutput)
        lexOutput.close()

        # calling function to get all file paths in the directory
        file_paths = get_all_file_paths(directory)

        print ("====> Moving The Files To $Root/LexImport/Completed Folder <====")
        mvDir = './Completed/'+dt
        for file_name in file_paths:
                mvFile = file_name.split('/')[-1]
                print (mvDir+"_"+mvFile)
                shutil.move(file_name, mvDir+"_"+mvFile)

        LexBot  = boto3.client('lex-models', region_name=region)

        print ("===> Building The Bot : " + bot)
        #print bot

        try:
                getBot = LexBot.get_bot(name=bot, versionOrAlias='$LATEST')
                del(getBot['ResponseMetadata'],getBot['detectSentiment'],getBot['version'],getBot['lastUpdatedDate'],getBot['createdDate'],getBot['status'])
        except:
                print ("### Bot Not Found")
                sys.exit()

        botInfo = json.dumps(getBot)

        #print(botInfo)

        proc = subprocess.Popen(['aws', 'lex-models', 'put-bot', '--region', region, '--name', bot, '--cli-input-json', botInfo], stdout=PIPE, stderr=PIPE)

        out, err = proc.communicate()
        #print (out)
        print (err)
		
		# If you want to Publish a new version after the Building the BOT, we can extract the checksum from the above "out" variable and invoke 
		# the BOTO3 method "create_bot_version" with the arguments Bot Name (You can refer the name of the bot from above and Checksum.

    else:

        print ("====> Failed On Imported The Configuration <====")

        # path to folder which needs to be zipped
        directory = './LexConfig'

        # calling function to get all file paths in the directory
        file_paths = get_all_file_paths(directory)

        print ("====> Moving The Files To $Root/LexImport/Error Folder <====")
        mvDir = './Error/'+dt
        for file_name in file_paths:
                mvFile = file_name.split('/')[-1]
                shutil.move(file_name, mvDir+"_"+mvFile)
        #Paste the face onto the contact sheet
        x, y = 0, 0
        contact.paste(face, (x, y, face.width, face.height))
        if x + face.width == contact.width:
            x = 0
            y += face.height
        else:
            x += face.width
            
    #Print the contact sheet
    display(contact)
        
#NAVIGATE THE ZIP FILE TO OBTAIN ALL IMAGES
#Open the ZIP file
fl = 'images.zip'
z = ZipFile(fl)

#Extract the images
pages = []
for name in z.namelist():
    dic = {}
    dic['name'] = name
    dic['image'] = Image.open(z.extract(name))
    pages.append(dic)
    
z.close()
    
#ACCEPT THE SEARCH WORD
#Accept the search word from the user
key = input('Enter the word to be searched for in the pages:')
Beispiel #6
0
def flash_release(path=None, st_serial=None):
    from panda import Panda, PandaDFU, ESPROM, CesantaFlasher
    from zipfile import ZipFile

    def status(x):
        print("\033[1;32;40m" + x + "\033[00m")

    if st_serial == None:
        # look for Panda
        panda_list = Panda.list()
        if len(panda_list) == 0:
            raise Exception(
                "panda not found, make sure it's connected and your user can access it"
            )
        elif len(panda_list) > 1:
            raise Exception("Please only connect one panda")
        st_serial = panda_list[0]
        print("Using panda with serial %s" % st_serial)

    if path == None:
        print(
            "Fetching latest firmware from github.com/commaai/panda-artifacts")
        r = requests.get(
            "https://raw.githubusercontent.com/commaai/panda-artifacts/master/latest.json"
        )
        url = json.loads(r.text)['url']
        r = requests.get(url)
        print("Fetching firmware from %s" % url)
        path = StringIO.StringIO(r.content)

    zf = ZipFile(path)
    zf.printdir()

    version = zf.read("version")
    status("0. Preparing to flash " + version)

    code_bootstub = zf.read("bootstub.panda.bin")
    code_panda = zf.read("panda.bin")

    code_boot_15 = zf.read("boot_v1.5.bin")
    code_boot_15 = code_boot_15[0:2] + "\x00\x30" + code_boot_15[4:]

    code_user1 = zf.read("user1.bin")
    code_user2 = zf.read("user2.bin")

    # enter DFU mode
    status("1. Entering DFU mode")
    panda = Panda(st_serial)
    panda.enter_bootloader()
    time.sleep(1)

    # program bootstub
    status("2. Programming bootstub")
    dfu = PandaDFU(PandaDFU.st_serial_to_dfu_serial(st_serial))
    dfu.clear_status()
    dfu.erase(0x8004000)
    dfu.erase(0x8000000)
    dfu.program(0x8000000, code_bootstub, 0x800)
    dfu.reset()
    time.sleep(1)

    # flash main code
    status("3. Flashing main code")
    panda = Panda(st_serial)
    panda.flash(code=code_panda)
    panda.close()
    time.sleep(1)

    # flashing ESP
    status("4. Flashing ESP (slow!)")
    align = lambda x, sz=0x1000: x + "\xFF" * ((sz - len(x)) % sz)
    esp = ESPROM(st_serial)
    esp.connect()
    flasher = CesantaFlasher(esp, 230400)
    flasher.flash_write(0x0, align(code_boot_15), True)
    flasher.flash_write(0x1000, align(code_user1), True)
    flasher.flash_write(0x81000, align(code_user2), True)
    flasher.flash_write(0x3FE000, "\xFF" * 0x1000)
    flasher.boot_fw()
    del flasher
    del esp
    time.sleep(1)

    # check for connection
    status("5. Verifying version")
    panda = Panda(st_serial)
    my_version = panda.get_version()
    print("dongle id: %s" % panda.get_serial()[0])
    print(my_version, "should be", version)
    assert (str(version) == str(my_version))

    # done!
    status("6. Success!")
Beispiel #7
0
    def writeStoryImpl(self, out):

        ## Python 2.5 ZipFile is rather more primative than later
        ## versions.  It can operate on a file, or on a StringIO, but
        ## not on an open stream.  OTOH, I suspect we would have had
        ## problems with closing and opening again to change the
        ## compression type anyway.
        zipio = StringIO.StringIO()

        ## mimetype must be first file and uncompressed.  Python 2.5
        ## ZipFile can't change compression type file-by-file, so we
        ## have to close and re-open
        outputepub = ZipFile(zipio, 'w', compression=ZIP_STORED)
        outputepub.debug = 3
        outputepub.writestr('mimetype', 'application/epub+zip')
        outputepub.close()

        ## Re-open file for content.
        outputepub = ZipFile(zipio, 'a', compression=ZIP_DEFLATED)
        outputepub.debug = 3

        ## Create META-INF/container.xml file.  The only thing it does is
        ## point to content.opf
        containerdom = getDOMImplementation().createDocument(
            None, "container", None)
        containertop = containerdom.documentElement
        containertop.setAttribute("version", "1.0")
        containertop.setAttribute(
            "xmlns", "urn:oasis:names:tc:opendocument:xmlns:container")
        rootfiles = containerdom.createElement("rootfiles")
        containertop.appendChild(rootfiles)
        rootfiles.appendChild(
            newTag(
                containerdom, "rootfile", {
                    "full-path": "content.opf",
                    "media-type": "application/oebps-package+xml"
                }))
        outputepub.writestr("META-INF/container.xml",
                            containerdom.toxml(encoding='utf-8'))
        containerdom.unlink()
        del containerdom

        ## Epub has two metadata files with real data.  We're putting
        ## them in content.opf (pointed to by META-INF/container.xml)
        ## and toc.ncx (pointed to by content.opf)

        ## content.opf contains metadata, a 'manifest' list of all
        ## other included files, and another 'spine' list of the items in the
        ## file

        uniqueid = 'fanficfare-uid:%s-u%s-s%s' % (
            self.getMetadata('site'), self.story.getList('authorId')[0],
            self.getMetadata('storyId'))

        contentdom = getDOMImplementation().createDocument(
            None, "package", None)
        package = contentdom.documentElement
        package.setAttribute("version", "2.0")
        package.setAttribute("xmlns", "http://www.idpf.org/2007/opf")
        package.setAttribute("unique-identifier", "fanficfare-uid")
        metadata = newTag(contentdom,
                          "metadata",
                          attrs={
                              "xmlns:dc": "http://purl.org/dc/elements/1.1/",
                              "xmlns:opf": "http://www.idpf.org/2007/opf"
                          })
        package.appendChild(metadata)

        metadata.appendChild(
            newTag(contentdom,
                   "dc:identifier",
                   text=uniqueid,
                   attrs={"id": "fanficfare-uid"}))

        if self.getMetadata('title'):
            metadata.appendChild(
                newTag(contentdom, "dc:title", text=self.getMetadata('title')))

        if self.getMetadata('author'):
            if self.story.isList('author'):
                for auth in self.story.getList('author'):
                    metadata.appendChild(
                        newTag(contentdom,
                               "dc:creator",
                               attrs={"opf:role": "aut"},
                               text=auth))
            else:
                metadata.appendChild(
                    newTag(contentdom,
                           "dc:creator",
                           attrs={"opf:role": "aut"},
                           text=self.getMetadata('author')))

        metadata.appendChild(
            newTag(contentdom,
                   "dc:contributor",
                   text="FanFicFare [https://github.com/JimmXinu/FanFicFare]",
                   attrs={"opf:role": "bkp"}))
        metadata.appendChild(newTag(contentdom, "dc:rights", text=""))
        if self.story.getMetadata('langcode'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:language",
                       text=self.story.getMetadata('langcode')))
        else:
            metadata.appendChild(newTag(contentdom, "dc:language", text='en'))

        #  published, created, updated, calibre
        #  Leave calling self.story.getMetadataRaw directly in case date format changes.
        if self.story.getMetadataRaw('datePublished'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:date",
                       attrs={"opf:event": "publication"},
                       text=self.story.getMetadataRaw(
                           'datePublished').strftime("%Y-%m-%d")))

        if self.story.getMetadataRaw('dateCreated'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:date",
                       attrs={"opf:event": "creation"},
                       text=self.story.getMetadataRaw('dateCreated').strftime(
                           "%Y-%m-%d")))

        if self.story.getMetadataRaw('dateUpdated'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:date",
                       attrs={"opf:event": "modification"},
                       text=self.story.getMetadataRaw('dateUpdated').strftime(
                           "%Y-%m-%d")))
            metadata.appendChild(
                newTag(contentdom,
                       "meta",
                       attrs={
                           "name":
                           "calibre:timestamp",
                           "content":
                           self.story.getMetadataRaw('dateUpdated').strftime(
                               "%Y-%m-%dT%H:%M:%S")
                       }))

        series = self.story.getMetadataRaw('series')
        if series and self.getConfig('calibre_series_meta'):
            series_index = "0.0"
            if '[' in series:
                logger.debug(series)
                ## assumed "series [series_index]"
                series_index = series[series.index(' [') + 2:-1]
                series = series[:series.index(' [')]

                ## calibre always outputs a series_index and it's
                ## always a float with 1 or 2 decimals.  FFF usually
                ## has either an integer or no index. (injected
                ## calibre series is the only float at this time)
                series_index = "%.2f" % float(series_index)

            metadata.appendChild(
                newTag(contentdom,
                       "meta",
                       attrs={
                           "name": "calibre:series",
                           "content": series
                       }))
            metadata.appendChild(
                newTag(contentdom,
                       "meta",
                       attrs={
                           "name": "calibre:series_index",
                           "content": series_index
                       }))

        if self.getMetadata('description'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:description",
                       text=self.getMetadata('description')))

        for subject in self.story.getSubjectTags():
            metadata.appendChild(newTag(contentdom, "dc:subject",
                                        text=subject))

        if self.getMetadata('site'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:publisher",
                       text=self.getMetadata('site')))

        if self.getMetadata('storyUrl'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:identifier",
                       attrs={"opf:scheme": "URL"},
                       text=self.getMetadata('storyUrl')))
            metadata.appendChild(
                newTag(contentdom,
                       "dc:source",
                       text=self.getMetadata('storyUrl')))

        ## end of metadata, create manifest.
        items = []  # list of (id, href, type, title) tuples(all strings)
        itemrefs = []  # list of strings -- idrefs from .opfs' spines
        items.append(("ncx", "toc.ncx", "application/x-dtbncx+xml",
                      None))  ## we'll generate the toc.ncx file,
        ## but it needs to be in the items manifest.

        guide = None
        coverIO = None

        coverimgid = "image0000"
        if not self.story.cover and self.story.oldcover:
            logger.debug(
                "writer_epub: no new cover, has old cover, write image.")
            (oldcoverhtmlhref, oldcoverhtmltype, oldcoverhtmldata,
             oldcoverimghref, oldcoverimgtype,
             oldcoverimgdata) = self.story.oldcover
            outputepub.writestr(oldcoverhtmlhref, oldcoverhtmldata)
            outputepub.writestr(oldcoverimghref, oldcoverimgdata)

            coverimgid = "image0"
            items.append((coverimgid, oldcoverimghref, oldcoverimgtype, None))
            items.append(("cover", oldcoverhtmlhref, oldcoverhtmltype, None))
            itemrefs.append("cover")
            metadata.appendChild(
                newTag(contentdom, "meta", {
                    "content": "image0",
                    "name": "cover"
                }))
            guide = newTag(contentdom, "guide")
            guide.appendChild(
                newTag(contentdom,
                       "reference",
                       attrs={
                           "type": "cover",
                           "title": "Cover",
                           "href": oldcoverhtmlhref
                       }))

        if self.getConfig('include_images'):
            imgcount = 0
            for imgmap in self.story.getImgUrls():
                imgfile = "OEBPS/" + imgmap['newsrc']
                outputepub.writestr(imgfile, imgmap['data'])
                items.append(
                    ("image%04d" % imgcount, imgfile, imgmap['mime'], None))
                imgcount += 1
                if 'cover' in imgfile:
                    # make sure coverimgid is set to the cover, not
                    # just the first image.
                    coverimgid = items[-1][0]

        items.append(("style", "OEBPS/stylesheet.css", "text/css", None))

        if self.story.cover:
            # Note that the id of the cover xhmtl *must* be 'cover'
            # for it to work on Nook.
            items.append(
                ("cover", "OEBPS/cover.xhtml", "application/xhtml+xml", None))
            itemrefs.append("cover")
            #
            # <meta name="cover" content="cover.jpg"/>
            metadata.appendChild(
                newTag(contentdom, "meta", {
                    "content": coverimgid,
                    "name": "cover"
                }))
            # cover stuff for later:
            # at end of <package>:
            # <guide>
            # <reference type="cover" title="Cover" href="Text/cover.xhtml"/>
            # </guide>
            guide = newTag(contentdom, "guide")
            guide.appendChild(
                newTag(contentdom,
                       "reference",
                       attrs={
                           "type": "cover",
                           "title": "Cover",
                           "href": "OEBPS/cover.xhtml"
                       }))

            if self.hasConfig("cover_content"):
                COVER = string.Template(self.getConfig("cover_content"))
            else:
                COVER = self.EPUB_COVER
            coverIO = StringIO.StringIO()
            coverIO.write(
                COVER.substitute(
                    dict(self.story.getAllMetadata().items() +
                         {'coverimg': self.story.cover}.items())))

        if self.getConfig("include_titlepage"):
            items.append(("title_page", "OEBPS/title_page.xhtml",
                          "application/xhtml+xml", "Title Page"))
            itemrefs.append("title_page")
        if len(self.story.getChapters()) > 1 and self.getConfig(
                "include_tocpage") and not self.metaonly:
            items.append(("toc_page", "OEBPS/toc_page.xhtml",
                          "application/xhtml+xml", "Table of Contents"))
            itemrefs.append("toc_page")

        ## save where to insert logpage.
        logpage_indices = (len(items), len(itemrefs))

        dologpage = ( self.getConfig("include_logpage") == "smart" and \
                          (self.story.logfile or self.story.getMetadataRaw("status") == "In-Progress") )  \
                     or self.getConfig("include_logpage") == "true"

        ## collect chapter urls and file names for internalize_text_links option.
        chapurlmap = {}
        for index, chap in enumerate(self.story.getChapters(fortoc=True)):
            if chap['html']:
                i = index + 1
                items.append(("file%s" % chap['index04'],
                              "OEBPS/file%s.xhtml" % chap['index04'],
                              "application/xhtml+xml", chap['title']))
                itemrefs.append("file%s" % chap['index04'])
                chapurlmap[chap['url']] = "file%s.xhtml" % chap[
                    'index04']  # url -> relative epub file name.

        if dologpage:
            if self.getConfig("logpage_at_end") == "true":
                ## insert logpage after chapters.
                logpage_indices = (len(items), len(itemrefs))
            items.insert(logpage_indices[0],
                         ("log_page", "OEBPS/log_page.xhtml",
                          "application/xhtml+xml", "Update Log"))
            itemrefs.insert(logpage_indices[1], "log_page")

        manifest = contentdom.createElement("manifest")
        package.appendChild(manifest)
        for item in items:
            (id, href, type, title) = item
            manifest.appendChild(
                newTag(contentdom,
                       "item",
                       attrs={
                           'id': id,
                           'href': href,
                           'media-type': type
                       }))

        spine = newTag(contentdom, "spine", attrs={"toc": "ncx"})
        package.appendChild(spine)
        for itemref in itemrefs:
            spine.appendChild(
                newTag(contentdom,
                       "itemref",
                       attrs={
                           "idref": itemref,
                           "linear": "yes"
                       }))
        # guide only exists if there's a cover.
        if guide:
            package.appendChild(guide)

        # write content.opf to zip.
        contentxml = contentdom.toxml(encoding='utf-8')

        # tweak for brain damaged Nook STR.  Nook insists on name before content.
        contentxml = contentxml.replace(
            '<meta content="%s" name="cover"/>' % coverimgid,
            '<meta name="cover" content="%s"/>' % coverimgid)
        outputepub.writestr("content.opf", contentxml)

        contentdom.unlink()
        del contentdom

        ## create toc.ncx file
        tocncxdom = getDOMImplementation().createDocument(None, "ncx", None)
        ncx = tocncxdom.documentElement
        ncx.setAttribute("version", "2005-1")
        ncx.setAttribute("xmlns", "http://www.daisy.org/z3986/2005/ncx/")
        head = tocncxdom.createElement("head")
        ncx.appendChild(head)
        head.appendChild(
            newTag(tocncxdom,
                   "meta",
                   attrs={
                       "name": "dtb:uid",
                       "content": uniqueid
                   }))
        head.appendChild(
            newTag(tocncxdom,
                   "meta",
                   attrs={
                       "name": "dtb:depth",
                       "content": "1"
                   }))
        head.appendChild(
            newTag(tocncxdom,
                   "meta",
                   attrs={
                       "name": "dtb:totalPageCount",
                       "content": "0"
                   }))
        head.appendChild(
            newTag(tocncxdom,
                   "meta",
                   attrs={
                       "name": "dtb:maxPageNumber",
                       "content": "0"
                   }))

        docTitle = tocncxdom.createElement("docTitle")
        docTitle.appendChild(
            newTag(tocncxdom, "text", text=self.getMetadata('title')))
        ncx.appendChild(docTitle)

        tocnavMap = tocncxdom.createElement("navMap")
        ncx.appendChild(tocnavMap)

        # <navPoint id="<id>" playOrder="<risingnumberfrom0>">
        #   <navLabel>
        #     <text><chapter title></text>
        #   </navLabel>
        #   <content src="<chapterfile>"/>
        # </navPoint>
        index = 0
        for item in items:
            (id, href, type, title) = item
            # only items to be skipped, cover.xhtml, images, toc.ncx, stylesheet.css, should have no title.
            if title:
                navPoint = newTag(tocncxdom,
                                  "navPoint",
                                  attrs={
                                      'id': id,
                                      'playOrder': unicode(index)
                                  })
                tocnavMap.appendChild(navPoint)
                navLabel = newTag(tocncxdom, "navLabel")
                navPoint.appendChild(navLabel)
                ## the xml library will re-escape as needed.
                navLabel.appendChild(
                    newTag(tocncxdom, "text", text=stripHTML(title)))
                navPoint.appendChild(
                    newTag(tocncxdom, "content", attrs={"src": href}))
                index = index + 1

        # write toc.ncx to zip file
        outputepub.writestr("toc.ncx", tocncxdom.toxml(encoding='utf-8'))
        tocncxdom.unlink()
        del tocncxdom

        # write stylesheet.css file.
        outputepub.writestr(
            "OEBPS/stylesheet.css",
            self.EPUB_CSS.substitute(self.story.getAllMetadata()))

        # write title page.
        if self.getConfig("titlepage_use_table"):
            TITLE_PAGE_START = self.EPUB_TABLE_TITLE_PAGE_START
            TITLE_ENTRY = self.EPUB_TABLE_TITLE_ENTRY
            WIDE_TITLE_ENTRY = self.EPUB_TABLE_TITLE_WIDE_ENTRY
            NO_TITLE_ENTRY = self.EPUB_TABLE_NO_TITLE_ENTRY
            TITLE_PAGE_END = self.EPUB_TABLE_TITLE_PAGE_END
        else:
            TITLE_PAGE_START = self.EPUB_TITLE_PAGE_START
            TITLE_ENTRY = self.EPUB_TITLE_ENTRY
            WIDE_TITLE_ENTRY = self.EPUB_TITLE_ENTRY  # same, only wide in tables.
            NO_TITLE_ENTRY = self.EPUB_NO_TITLE_ENTRY
            TITLE_PAGE_END = self.EPUB_TITLE_PAGE_END

        if coverIO:
            outputepub.writestr("OEBPS/cover.xhtml", coverIO.getvalue())
            coverIO.close()

        titlepageIO = StringIO.StringIO()
        self.writeTitlePage(out=titlepageIO,
                            START=TITLE_PAGE_START,
                            ENTRY=TITLE_ENTRY,
                            WIDE_ENTRY=WIDE_TITLE_ENTRY,
                            END=TITLE_PAGE_END,
                            NO_TITLE_ENTRY=NO_TITLE_ENTRY)
        if titlepageIO.getvalue():  # will be false if no title page.
            outputepub.writestr("OEBPS/title_page.xhtml",
                                titlepageIO.getvalue())
        titlepageIO.close()

        # write toc page.
        tocpageIO = StringIO.StringIO()
        self.writeTOCPage(tocpageIO, self.EPUB_TOC_PAGE_START,
                          self.EPUB_TOC_ENTRY, self.EPUB_TOC_PAGE_END)
        if tocpageIO.getvalue():  # will be false if no toc page.
            outputepub.writestr("OEBPS/toc_page.xhtml", tocpageIO.getvalue())
        tocpageIO.close()

        if dologpage:
            # write log page.
            logpageIO = StringIO.StringIO()
            self.writeLogPage(logpageIO)
            outputepub.writestr("OEBPS/log_page.xhtml", logpageIO.getvalue())
            logpageIO.close()

        if self.hasConfig('chapter_start'):
            CHAPTER_START = string.Template(self.getConfig("chapter_start"))
        else:
            CHAPTER_START = self.EPUB_CHAPTER_START

        if self.hasConfig('chapter_end'):
            CHAPTER_END = string.Template(self.getConfig("chapter_end"))
        else:
            CHAPTER_END = self.EPUB_CHAPTER_END

        for index, chap in enumerate(
                self.story.getChapters()):  # (url,title,html)
            if chap['html']:
                chap_data = chap['html']
                if self.getConfig('internalize_text_links'):
                    soup = bs4.BeautifulSoup(chap['html'], 'html5lib')
                    changed = False
                    for alink in soup.find_all('a'):
                        if alink.has_attr(
                                'href') and alink['href'] in chapurlmap:
                            alink['href'] = chapurlmap[alink['href']]
                            changed = True
                    if changed:
                        chap_data = unicode(soup)
                        # Don't want html, head or body tags in
                        # chapter html--bs4 insists on adding them.
                        chap_data = re.sub(r"</?(html|head|body)[^>]*>\r?\n?",
                                           "", chap_data)

                #logger.debug('Writing chapter text for: %s' % chap.title)
                chap['url'] = removeEntities(chap['url'])
                chap['chapter'] = removeEntities(chap['chapter'])
                chap['title'] = removeEntities(chap['title'])
                chap['origchapter'] = removeEntities(chap['origtitle'])
                chap['tocchapter'] = removeEntities(chap['toctitle'])
                # escape double quotes in all vals.
                for k, v in chap.items():
                    if isinstance(v, basestring):
                        chap[k] = v.replace('"', '&quot;')
                fullhtml = CHAPTER_START.substitute(chap) + \
                    chap_data.strip() + \
                    CHAPTER_END.substitute(chap)
                # strip to avoid ever growning numbers of newlines.
                # ffnet(& maybe others) gives the whole chapter text
                # as one line.  This causes problems for nook(at
                # least) when the chapter size starts getting big
                # (200k+)
                fullhtml = re.sub(r'(</p>|<br ?/>)\n*', r'\1\n', fullhtml)

                outputepub.writestr("OEBPS/file%s.xhtml" % chap['index04'],
                                    fullhtml.encode('utf-8'))
                del fullhtml

        if self.story.calibrebookmark:
            outputepub.writestr("META-INF/calibre_bookmarks.txt",
                                self.story.calibrebookmark)

        # declares all the files created by Windows.  otherwise, when
        # it runs in appengine, windows unzips the files as 000 perms.
        for zf in outputepub.filelist:
            zf.create_system = 0
        outputepub.close()
        out.write(zipio.getvalue())
        zipio.close()
Beispiel #8
0
def unzipIPSW(fname):

    if os.path.exists("custom.ipsw"):
        os.remove("custom.ipsw")

    print("Starting IPSW unzipping")
    outputFolder = os.getcwd()
    newpath = fname.rstrip()
    fname = str(newpath)
    testFile = os.path.exists(fname)

    while not testFile or not fname.endswith != (".ipsw"):
        print(
            "Invalid filepath/filename.\nPlease try again with a valid filepath/filename."
        )
        fname = input(
            "Enter the path to the IPSW file (Or drag and drop the IPSW into this window):\n"
        )
        newpath = fname.rstrip()
        fname = str(newpath)
        testFile = os.path.exists(fname)
    else:
        #Will now continue with new valid file
        print("Continuing...")
    if testFile and fname.endswith(".ipsw"):
        if os.path.exists("resources/restoreFiles/igetnonce"):
            shutil.move("resources/restoreFiles/igetnonce", "igetnonce")
        if os.path.exists("resources/restoreFiles/tsschecker"):
            shutil.move("resources/restoreFiles/tsschecker", "tsschecker")
        if os.path.exists("resources/restoreFiles/futurerestore"):
            shutil.move("resources/restoreFiles/futurerestore",
                        "futurerestore")
        if os.path.exists("resources/restoreFiles/irecovery"):
            shutil.move("resources/restoreFiles/irecovery", "irecovery")
        print("IPSW found at given path...")
        print("Cleaning up old files...")
        done = True
        removeFiles(done)
        print("Unzipping..")
        with ZipFile(fname, 'r') as zip_ref:
            zip_ref.extractall(outputFolder)

        source = ("Firmware/dfu/")
        dest1 = os.getcwd()

        files = os.listdir(source)

        for f in files:
            shutil.move(source + f, dest1)
        devicemodel = str(localdevice.getmodel())
        t = localdevice.pick3264(devicemodel, fname)
        if t == 32:
            createCustomIPSW32(fname)
        elif t == 64:
            pwndfumode()
            createCustomIPSW64(fname, devicemodel)
        else:
            exit(2)

    else:
        print('\033[91m' + "ERROR: Not valid filepath...")
        print("ERROR: Try again" + '\033[0m')
    def test_ctypes_binary_creation(self, toolchain_variant):
        """Create a python_binary() with all native toolchain variants, and test the result."""
        with temporary_dir() as tmp_dir:
            pants_run = self.run_pants(command=['binary', self._binary_target],
                                       config={
                                           GLOBAL_SCOPE_CONFIG_SECTION: {
                                               'pants_distdir': tmp_dir,
                                           },
                                           'native-build-step': {
                                               'toolchain_variant':
                                               toolchain_variant.value,
                                           },
                                       })

            self.assert_success(pants_run)

            # Check that we have selected the appropriate compilers for our selected toolchain variant,
            # for both C and C++ compilation.
            # TODO(#6866): don't parse info logs for testing! There is a TODO in test_cpp_compile.py
            # in the native backend testing to traverse the PATH to find the selected compiler.
            compiler_names_to_check = toolchain_variant.resolve_for_enum_variant(
                {
                    'gnu': ['gcc', 'g++'],
                    'llvm': ['clang', 'clang++'],
                })
            for compiler_name in compiler_names_to_check:
                self.assertIn(
                    "selected compiler exe name: '{}'".format(compiler_name),
                    pants_run.stdout_data)

            # All of our toolchains currently use the C++ compiler's filename as argv[0] for the linker,
            # so there is only one name to check.
            linker_names_to_check = toolchain_variant.resolve_for_enum_variant(
                {
                    'gnu': ['g++'],
                    'llvm': ['clang++'],
                })
            for linker_name in linker_names_to_check:
                self.assertIn(
                    "selected linker exe name: '{}'".format(linker_name),
                    pants_run.stdout_data)

            # Check for the pex and for the wheel produced for our python_dist().
            pex = os.path.join(tmp_dir, 'bin.pex')
            self.assertTrue(is_executable(pex))

            # The + is because we append the target's fingerprint to the version. We test this version
            # string in test_build_local_python_distributions.py.
            wheel_glob = os.path.join(tmp_dir, 'ctypes_test-0.0.1+*.whl')
            wheel_dist_with_path = assert_single_element(glob.glob(wheel_glob))
            wheel_dist = re.sub(
                '^{}{}'.format(re.escape(tmp_dir), os.path.sep), '',
                wheel_dist_with_path)

            dist_name, dist_version, wheel_platform = name_and_platform(
                wheel_dist)
            self.assertEqual(dist_name, 'ctypes_test')
            contains_current_platform = Platform.current.resolve_for_enum_variant(
                {
                    'darwin': wheel_platform.startswith('macosx'),
                    'linux': wheel_platform.startswith('linux'),
                })
            self.assertTrue(contains_current_platform)

            # Verify that the wheel contains our shared libraries.
            wheel_files = ZipFile(wheel_dist_with_path).namelist()

            dist_versioned_name = '{}-{}.data'.format(dist_name, dist_version)
            for shared_lib_filename in [
                    'libasdf-c_ctypes.so', 'libasdf-cpp_ctypes.so'
            ]:
                full_path_in_wheel = os.path.join(dist_versioned_name, 'data',
                                                  shared_lib_filename)
                self.assertIn(full_path_in_wheel, wheel_files)

            # Execute the binary and ensure its output is correct.
            binary_run_output = invoke_pex_for_output(pex)
            self.assertEqual(b'x=3, f(x)=17\n', binary_run_output)
Beispiel #10
0
def exec_process():
    if not isdir(consts.descargas):
        return {'ok': False, 'message': 'No existe la ruta con los archivos que se deben procesar'}, 500
    
    # Se listan los archivos comprimidos descargados
    archivos_comprimidos = read_file(consts.descargas, consts.extension_comprimidos, consts.nombre_comprimido_devoluciones)

    if len(archivos_comprimidos) > 0:
        for archivo_comprimido in archivos_comprimidos:
            # Se descomprimime el archivo
            archivo_zip = ZipFile(archivo_comprimido)
            try:
                archivo_zip.extractall(consts.descargas)
                # print('Extrayendo el archivo ' + archivo_comprimido)
            except Exception as e:
                print('Error al extraer el archivo ' + archivo_comprimido)
                print('Detalle del error' + str(e))
                pass
            archivo_zip.close()
            # Una vez descomprimido, se puede eliminar el archivo comprimido
            remove(archivo_comprimido)
            # print('Eliminando el archivo ' + archivo_comprimido)

    # Se vuelve a listar los archivos en busca de los .txt de las devoluciones
    archivos_txt = read_file(consts.descargas, consts.extension_txt, consts.nombre_txt)

    if len(archivos_txt) > 0:
        for archivo_txt in archivos_txt:
            # print('Archivo: ' + archivo_txt)
            # Lectura del archivo
            archivo_texto = open(archivo_txt, 'r')
            contenido = archivo_texto.read()
            archivo_texto.seek(0)
            # Verificamos si aún no ha sido leído el archivo previamente
            if search_file(archivo_txt.split(sep)[-1]):
                print('El archivo ' + archivo_txt.split(sep)[-1] + ' ya fue procesado')
            # Verificamos si el archivo corresponde a un archivo de devoluciones de empanadas Joselo
            elif contenido.count(consts.proveedor) > 0 and contenido.count(consts.codigo_proveedor) > 0:
                # Se lee línea por ĺínea para obtener los valores
                lineas = archivo_texto.readlines()
                numero_linea = 0
                linea_local = -99
                ciudad = ''
                local = ''
                fecha = ''
                numero_bandejas = 0
                for linea in lineas:
                    if linea.count(consts.etiqueta_local) == 1:
                        # El local está en la siguiente línea de etiquetaLocal = 'Tda/Alm/CDI:'
                        linea_local = numero_linea + 1
                    if numero_linea == linea_local:
                        # Se trata de la línea del local, el cual se encuentra desde el inicio de la línea
                        local = linea[0:linea.index(consts.codigo_proveedor)].strip()
                    if linea.count(consts.pais) == 1:
                        # Se trata de la línea donde se encuentra la ciudad ej. PORTOVIEJO - ECUADOR
                        ciudad = linea[0:linea.index(consts.pais) - 3].strip()
                    if linea.count(consts.etiqueta_fecha_elaboracion) == 1:
                        # Se trata de la línea en la que se encuentra la fecha
                        fecha = linea[linea.index(consts.etiqueta_fecha_elaboracion) + len(consts.etiqueta_fecha_elaboracion):len(
                            linea) - 1].strip()
                    if linea.count(consts.etiqueta_total) == 1:
                        # Se trata de la línea en la que se encuentra el total de la devolución
                        valor = float(linea[linea.index(consts.etiqueta_total) + len(consts.etiqueta_total):len(linea) - 1].strip())
                        # Se verifica que el valor sea múltipo de alguno de los precios de las bandejas
                        for precio in consts.precios:
                            if round((valor % precio), consts.decimales_a_considerar) == 0 or consts.precios.count(round((valor % precio), consts.decimales_a_considerar)) > 0:
                                numero_bandejas = int(valor / precio)

                    numero_linea = numero_linea + 1
                
                if numero_bandejas == 0:
                    print('El valor del archivo: ' + archivo_txt + ' no es correcto')
                else:
                    # Se ingresan los valores del archivo y el nombre del archivo leído a la base de datos
                    insert_filename(archivo_txt.split(sep)[-1])
                    # Se separa la fecha en día, mes y año
                    insert_devolucion(ciudad, local, int(fecha.split('/')[2]), mes(fecha.split('/')[1]), int(fecha.split('/')[0]), numero_bandejas)
                    print('Datos del archivo ' + archivo_txt.split(sep)[-1] + ':')
                    print(ciudad)
                    print(local)
                    print(fecha)
                    print(str(numero_bandejas) + ' bandejas\n')
            # Una vez obtenido los valores, se puede eliminar el archivo txt
            remove(archivo_txt)
            archivo_texto.close()
    return {'ok': True, 'message': 'Proceso finalizado correctamente', 'archivos_procesados': archivos_txt}
Beispiel #11
0
def createCustomIPSW64(fname, devicemodel):
    print("Starting iBSS/iBEC patching")
    patch_folder = Path("resources/patches/")
    phoneibec = patch_folder / "ibec5s.patch"
    phoneibss = patch_folder / "ibss5s.patch"
    ipadminiibec = patch_folder / "ibec_ipad4b.patch"
    ipadminiibss = patch_folder / "ibss_ipad4b.patch"
    ipadairibec = patch_folder / "ibec_ipad4.patch"
    ipadairibss = patch_folder / "ibss_ipad4.patch"
    if "iPhone" in fname and "10.3.3" in fname:
        print("Looks like you are downgrading an iPhone 5s to 10.3.3!")
        bsdiff4.file_patch_inplace("iBEC.iphone6.RELEASE.im4p", phoneibec)
        bsdiff4.file_patch_inplace("iBSS.iphone6.RELEASE.im4p", phoneibss)
        device = "iPhone5s"
    elif "iPad" in fname and "10.3.3" in fname:
        if devicemodel == "iPad4,1" or devicemodel == "iPad4,2" or devicemodel == "iPad4,3":
            print("Looks like you are downgrading an iPad Air to 10.3.3!")
            bsdiff4.file_patch_inplace("iBEC.ipad4.RELEASE.im4p", ipadairibec)
            bsdiff4.file_patch_inplace("iBSS.ipad4.RELEASE.im4p", ipadairibss)
            device = "iPadAir"
        elif devicemodel == "iPad4,4" or devicemodel == "iPad4,5":
            print("Looks like you are downgrading an iPad Mini 2 to 10.3.3!")
            bsdiff4.file_patch_inplace("iBEC.ipad4b.RELEASE.im4p",
                                       ipadminiibec)
            bsdiff4.file_patch_inplace("iBSS.ipad4b.RELEASE.im4p",
                                       ipadminiibss)
            device = "iPadMini"
        else:
            print(
                "ERROR: Unknown input. Exiting purely because you can't read and that's sad..."
            )
            print("ERROR: Exiting...")
            exit(1)
    print("Patched iBSS/iBEC")
    print("About to re-build IPSW")

    if device == "iPhone5s":
        shutil.move("iBEC.iphone6.RELEASE.im4p", "Firmware/dfu/")
        shutil.move("iBSS.iphone6.RELEASE.im4p", "Firmware/dfu/")
        shutil.move("Firmware/Mav7Mav8-7.60.00.Release.bbfw",
                    "resources/restoreFiles/baseband.bbfw")
        if devicemodel == "iPhone6,1":
            shutil.move("Firmware/all_flash/sep-firmware.n51.RELEASE.im4p",
                        "resources/restoreFiles/sep.im4p")
        elif devicemodel == "iPhone6,2":
            shutil.move("Firmware/all_flash/sep-firmware.n53.RELEASE.im4p",
                        "resources/restoreFiles/sep.im4p")
        touch("Firmware/usr/local/standalone/blankfile")
        with ZipFile('custom.ipsw', 'w') as zipObj2:
            zipObj2.write('Restore.plist')
            zipObj2.write('kernelcache.release.iphone8b')
            zipObj2.write('kernelcache.release.iphone6')
            zipObj2.write('BuildManifest.plist')
            zipObj2.write('058-75381-062.dmg')
            zipObj2.write('058-74940-063.dmg')
            zipObj2.write('058-74917-062.dmg')
            zipObj2.write('._058-74917-062.dmg')
            for folderName, subfolders, filenames in os.walk("Firmware"):
                for filename in filenames:
                    filePath = os.path.join(folderName, filename)
                    zipObj2.write(filePath)
        restore64(devicemodel)

    elif device == "iPadAir" or device == "iPadMini":
        if devicemodel == "iPad4,1" or devicemodel == "iPad4,2" or devicemodel == "iPad4,3":
            shutil.move("iBEC.ipad4.RELEASE.im4p", "Firmware/dfu/")
            shutil.move("iBSS.ipad4.RELEASE.im4p", "Firmware/dfu/")
            if devicemodel == "iPad4,1":
                shutil.move("Firmware/all_flash/sep-firmware.j71.RELEASE.im4p",
                            "resources/restoreFiles/sep.im4p")
            elif devicemodel == "iPad4,2":
                shutil.move("Firmware/all_flash/sep-firmware.j72.RELEASE.im4p",
                            "resources/restoreFiles/sep.im4p")
                shutil.move("Firmware/Mav7Mav8-7.60.00.Release.bbfw",
                            "resources/restoreFiles/baseband.bbfw")
            elif devicemodel == "iPad4,3":
                shutil.move("Firmware/all_flash/sep-firmware.j73.RELEASE.im4p",
                            "resources/restoreFiles/sep.im4p")
                shutil.move("Firmware/Mav7Mav8-7.60.00.Release.bbfw",
                            "resources/restoreFiles/baseband.bbfw")
        elif devicemodel == "iPad4,4" or devicemodel == "iPad4,5":
            shutil.move("iBEC.ipad4b.RELEASE.im4p", "Firmware/dfu/")
            shutil.move("iBSS.ipad4b.RELEASE.im4p", "Firmware/dfu/")
            if devicemodel == "iPad4,4":
                shutil.move("Firmware/all_flash/sep-firmware.j85.RELEASE.im4p",
                            "resources/restoreFiles/sep.im4p")
            elif devicemodel == "iPad4,5":
                shutil.move("Firmware/all_flash/sep-firmware.j86.RELEASE.im4p",
                            "resources/restoreFiles/sep.im4p")
                shutil.move("Firmware/Mav7Mav8-7.60.00.Release.bbfw",
                            "resources/restoreFiles/baseband.bbfw")
        touch("Firmware/usr/local/standalone/blankfile")

        with ZipFile('custom.ipsw', 'w') as zipObj2:
            zipObj2.write('Restore.plist')
            zipObj2.write('kernelcache.release.ipad4')
            zipObj2.write('kernelcache.release.ipad4b')
            zipObj2.write('BuildManifest.plist')
            zipObj2.write('058-75381-062.dmg')
            zipObj2.write('058-75094-062.dmg')
            zipObj2.write('058-74940-063.dmg')
            zipObj2.write('._058-75094-062.dmg')
            for folderName, subfolders, filenames in os.walk("Firmware"):
                for filename in filenames:
                    filePath = os.path.join(folderName, filename)
                    zipObj2.write(filePath)
        restore64(devicemodel)
    else:
        print('\033[91m' + "something broke lmao" + '\033[0m')
        exit(1)
Beispiel #12
0
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental.preprocessing import StringLookup
"""
## Prepare the data

### Download and prepare the DataFrames

First, let's download the movielens data.

The downloaded folder will contain three data files: `users.dat`, `movies.dat`,
and `ratings.dat`.
"""

urlretrieve("http://files.grouplens.org/datasets/movielens/ml-1m.zip",
            "movielens.zip")
ZipFile("movielens.zip", "r").extractall()
"""
Then, we load the data into pandas DataFrames with their proper column names.
"""

users = pd.read_csv(
    "ml-1m/users.dat",
    sep="::",
    names=["user_id", "sex", "age_group", "occupation", "zip_code"],
)

ratings = pd.read_csv(
    "ml-1m/ratings.dat",
    sep="::",
    names=["user_id", "movie_id", "rating", "unix_timestamp"],
)
def hathi_limited_view_package_dirs(tmpdir_factory):
    test_dir = tmpdir_factory.mktemp("hathi_limited", numbered=True)
    sample_package_names = {
        "uiuc.40": [
            (
                "40.mets.xml",
                (
                    "40",
                    [
                        "40.mets.xml"
                    ] +
                    [f"{str(a).zfill(7)}.txt" for a in range(282)] +
                    [f"{str(a).zfill(7)}.jp2" for a in range(282)] +
                    [f"{str(a).zfill(7)}.xml" for a in range(282)]
                )
            )
        ],
        "uiuc.40834v1": [
            (
                "40834v1.mets.xml",
                (
                    "40834v1",
                    [
                        "40834v1.mets.xml"
                    ] +
                    [f"{str(a).zfill(7)}.txt" for a in range(256)] +
                    [f"{str(a).zfill(7)}.tif" for a in range(256)] +
                    [f"{str(a).zfill(7)}.xml" for a in range(256)]
                )
            )
        ],
        "uiuc.5285248v1924": [
            (
                "5285248v1924.mets.xml",
                (
                    "5285248v1924",
                    [
                        "5285248v1924.mets.xml"
                    ] +
                    [f"{str(a).zfill(7)}.txt" for a in range(282)] +
                    [f"{str(a).zfill(7)}.jp2" for a in range(282)] +
                    [f"{str(a).zfill(7)}.xml" for a in range(282)]
                )
            )
        ]
    }

    # eg: 5285248v1924/
    for pkg_name, pkg_data in sample_package_names.items():
        pkg_dir = test_dir.mkdir(pkg_name)

        tmp_dir = test_dir.mkdir(f"build_dir-{pkg_name}")
        for mets_file_filename, archive_data in pkg_data:
            # Add any files to the package
            pkg_dir.join(mets_file_filename).write("")
            bib_id, zip_content = archive_data

            # eg: 5285248v1924/5285248v1924.zip
            with ZipFile(pkg_dir.join(f"{bib_id}.zip"), 'w') as myzip:
                build_package_dir = tmp_dir.mkdir(bib_id)
                for zipped_file in zip_content:
                    generated_file = build_package_dir.join(zipped_file)
                    generated_file.write("")

                    arcname = os.path.join(bib_id, zipped_file)
                    myzip.write(generated_file, arcname=arcname)

    return test_dir
Beispiel #14
0
    def test(self):
        tdata = TestBase.get_tst_data()
        rd = tdata.get_rd('cancm4_tas')

        output_formats = [
            constants.OutputFormatName.CSV_SHAPEFILE,
            constants.OutputFormatName.CSV, constants.OutputFormatName.NETCDF,
            constants.OutputFormatName.SHAPEFILE,
            constants.OutputFormatName.OCGIS
        ]
        _with_auxiliary_files = [True, False]
        for output_format, with_auxiliary_files in itertools.product(
                output_formats, _with_auxiliary_files):
            dir_output = tempfile.mkdtemp()
            try:
                ocgis.env.DIR_OUTPUT = dir_output

                ops = ocgis.OcgOperations(dataset=rd,
                                          snippet=True,
                                          output_format=output_format,
                                          geom='state_boundaries',
                                          select_ugid=[23],
                                          prefix=output_format + '_data')
                ret_path = ops.execute()

                fmtd_ret = format_return(
                    ret_path, ops, with_auxiliary_files=with_auxiliary_files)

                assert (os.path.exists(fmtd_ret))
                if output_format in [
                        constants.OutputFormatName.CSV,
                        constants.OutputFormatName.NETCDF
                ] and with_auxiliary_files is False:
                    assert (fmtd_ret.endswith(output_format))
                else:
                    assert (is_zipfile(fmtd_ret))
                    zipf = ZipFile(fmtd_ret, 'r')
                    try:
                        namelist = zipf.namelist()
                        assert (len(namelist) > 0)
                        if output_format in [
                                constants.OutputFormatName.CSV_SHAPEFILE
                        ]:
                            test = [
                                re.match('shp/.+'.format(output_format), name)
                                != None for name in namelist
                            ]
                            assert (any(test))
                        else:
                            test = [
                                re.match('shp/.+'.format(output_format),
                                         name) == None for name in namelist
                            ]
                            assert (all(test))
                    finally:
                        zipf.close()
            # numpy formats are not implemented
            except NotImplementedError:
                assert (output_format == constants.OutputFormatName.OCGIS)
            finally:
                ocgis.env.reset()
                shutil.rmtree(dir_output)
Beispiel #15
0
# 10 Monkey classification with custom DNN Architecture

"""
Setting Operating System Variable 'KAGGLE_USERNAME' as theroyakash. 
You can download your 'KAGGLE_KEY' from kaggle's accounts settngs.
"""
import os
os.environ['KAGGLE_USERNAME'] = "******"
os.environ['KAGGLE_KEY'] = "##########CONFIDENTIAL##########"

# Run this command on command line !kaggle datasets download -d slothkong/10-monkey-species


from zipfile import ZipFile

with ZipFile('10-monkey-species.zip', 'r') as zipObj:
   # Extract all the contents of zip file in current directory
   zipObj.extractall()


import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, GlobalAveragePooling2D, BatchNormalization
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation, Concatenate, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers, activations
Beispiel #16
0
    if os_type == "Windows":
        print('Running on Windows system')
        discord_lstorage = usr_home + '\\AppData\\Roaming\\discord\\Local Storage\\'
        discord_sstorage = usr_home + '\\AppData\\Roaming\\discord\\Session Storage\\'
        archive_location = usr_home + '\\AppData\\Local\\Temp\\' + zip_name

    elif os_type == "Linux":
        print('Running on Linux system')
        discord_lstorage = usr_home + '/.config/discord/Local Storage/'
        discord_sstorage = usr_home + '/.config/discord/Session Storage'
        archive_location = '/tmp/' + zip_name

    loot_1 = get_all_file_paths(discord_lstorage)
    loot_2 = get_all_file_paths(discord_sstorage)

    with ZipFile(archive_location, 'w') as zip:
        # writing each file one by one
        for file in loot_1:
            zip.write(file)
        for file in loot_2:
            zip.write(file)

    # FTP
    ftp = FTP()
    ftp.set_debuglevel(2)
    #ftp.connect(<SERVER>, <PORT>)
    #ftp.login(<USER>, <PASSWORD>)
    #ftp.cwd(<REMOTE FTP DIRECTORY>)

    # Sending file on FTP server
    print(ftp.dir())
Beispiel #17
0
def read_graphs(stream, format = None,
                full = False, normalize = False, reify = False,
                frameworks = None, prefix = None, text = None, filter = None,
                trace = 0, strict = 0, quiet = False, robust = False,
                alignment = None, anchors = None, pretty = False,
                id = None, n = None, i = None):

  name = getattr(stream, "name", "");
  if name.endswith(".zip"):
    with ZipFile(name) as zip:
      stream = None;
      for entry in zip.namelist():
        if entry.endswith(".mrp"):
          if stream is not None:
            print("read_graphs(): multiple MRP entries in ‘{}’; exit."
                  "".format(name), file = sys.stderr);
            sys.exit(1);
          stream = zip.open(entry);
      if stream is None:
        print("read_graphs(): missing MRP entry in ‘{}’; exit."
              "".format(name), file = sys.stderr);
        sys.exit(1);

  generator = None;
  if format in {"amr", "camr"}:
    generator \
      = codec.amr.read(stream, full = full, reify = reify,
                       text = text, camr = format == "camr",
                       alignment = alignment, quiet = quiet, trace = trace);
  elif format in {"ccd", "dm", "pas", "psd"}:
    generator = codec.sdp.read(stream, framework = format, text = text);
  elif format == "eds":
    generator = codec.eds.read(stream, reify = reify, text = text);
  elif format == "mrp":
    generator = codec.mrp.read(stream, text = text, robust = robust);
  elif format == "pmb":
    generator = codec.pmb.read(stream, full = full,
                               reify = reify, text = text,
                               trace = trace, strict = strict);
  elif format == "treex":
    generator = codec.treex.read(stream)
  elif format == "ucca":
    generator = codec.ucca.read(stream, text = text, prefix = prefix);
  elif format == "conllu" or format == "ud":
    generator = codec.conllu.read(stream, framework = format, text = text,
                                  anchors = anchors, trace = trace);
  elif format == "eud":
    generator = codec.conllu.read(stream, framework=format, text=text,
                                  anchors=anchors, trace=trace, enhanced_graph=True);
  else:
    print("read_graphs(): invalid input codec {}; exit."
          "".format(format), file = sys.stderr);
    sys.exit(1);

  if generator is None:
    return None, None;

  #
  # (for now) break out of the generators, for downstream simplicity
  #
  graphs = [];
  overlays = [];
  j = 0;
  while n is None or n < 1 or j < n:
    try:
      graph, overlay = next(generator);
      if frameworks is not None and graph.framework not in frameworks: continue;
      if filter is not None and graph.id not in filter: continue;
      if id is not None:
        if graph.id == id:
          graphs.append(graph); overlays.append(overlay);
      elif i is not None and i >= 0:
        if j == i:
          graphs.append(graph); overlays.append(overlay);
          break;
      else:
        graphs.append(graph); overlays.append(overlay);
      j += 1;
    except StopIteration:
      break;
    except Exception as error:
      print(error, file = sys.stderr);
      pass;

  if pretty:
    for graph in graphs: graph.prettify(trace);
  if normalize:
    for graph in graphs: graph.normalize(normalize, trace);

  return graphs, overlays;
Beispiel #18
0
import plotly.graph_objs as go
import plotly.express as px
from flask_babel import gettext
from plotly.subplots import make_subplots
import plotly.express as px

import numpy as np
import altair as alt

from datetime import datetime

import geopandas

url = "https://statbel.fgov.be/sites/default/files/files/opendata/deathday/DEMO_DEATH_OPEN.zip"
content = requests.get(url)
zf = ZipFile(BytesIO(content.content))

# find the first matching csv file in the zip:
match = [s for s in zf.namelist() if ".txt" in s][0]
# the first line of the file contains a string - that line shall de     ignored, hence skiprows

mydateparser = lambda x: datetime.strptime(x, "%d/%m/%Y")

df = pandas.read_csv(zf.open(match),
                     parse_dates=['DT_DATE'],
                     date_parser=mydateparser,
                     low_memory=False,
                     sep=";")

df.to_csv("../static/csv/mortality_statbel.csv", index=False)
Beispiel #19
0
def _handlerContent(req, args, repositoryDir):
    # Documents, extracted from archive -- or a simple default page
    #_logTmp("Got request for %s." % req.uri)
    try:
        suburi = req.uri.split(".py/", 1)
        if len(suburi)>1:
            if "/" in suburi[1]:   # we need to extract .oao contents
                (oao, path) = suburi[1].split("/", 1)
                path = re.sub(r"[\\/]+", "/", path)
                path = re.sub(r"^/", "", path)
                try:
                    zipPath = os.path.join(repositoryDir, oao)
                    global _zipCache
                    if zipPath not in _zipCache:
                        _zipCache[zipPath] = ZipFile(open(zipPath, 'rb'))
                    pack = _zipCache[zipPath]
                    fileName = path.split("/")[-1]
                    
                    filelist = pack.namelist()
                    if path not in filelist and "icon" in args:
                        req.internal_redirect("/"+"/".join(suburi[0].split("/")[:-1])+"/Unknown.png")
                        return apache.OK
                    elif path not in filelist and "doc" in args:
                        import orange_headfoot
                        return orange_headfoot._handleStatic(req, "<head><title>Orange - Documentation not found</title></head><body>Sorry, the documentation you look for is not available.</body>")
                    if fileName=="" or path not in filelist:
                        if path[-1]=="/": path=path[:-1]
                        indexPages = ["main.htm", "main.html", "index.html", "index.htm", "default.html", "default.htm"]
                        for indexPage in indexPages:
                            if path+"/"+indexPage in filelist:
                                path = path+"/"+indexPage
                                break
                        if path not in filelist:
                            return apache.HTTP_NOT_FOUND
                        fileName = path.split("/")[-1]
                    
                    type = mimetypes.guess_type(fileName)[0]
                    if not type:
                        return apache.NO_CONTENT
                    content = pack.read(path)
                    open('/tmp/addons.tmp', 'a').write("%s: %s\n" % (path, type))
                    if type.startswith("text/html"):
                        try:
                            import orange_headfoot
                            return orange_headfoot._handleStatic(req, content)
                        except Exception as e:
                            pass
                    req.content_type = type
                    req.write(content)
                    return apache.OK
                except Exception as e:
                    return apache.INTERNAL_ERROR
            else:
                return apache.DECLINED
        else:
#            content = """<!doctype html>
#            <html>
#              <head>
#                <title>Orange Add-on Repository</title>
#              </head>
#              <body><h1>Orange Add-on Repository %s</h1>
#                <p>This is an <a href="http://www.ailab.si/orange">Orange</a> add-on repository. Would you like to <a href="upload.html">upload</a> new add-ons?</p>
#              </body>
#            </html>""" % req.uri
#            try: 
#                import orange_headfoot
#                orange_headfoot._handleStatic(req, content)
#            except:
#                req.content_type = 'text/html; charset=utf-8'
#                req.write(content)
#                return apache.OK
            util.redirect(req, ".")
    finally:
        pass           
Beispiel #20
0
def datasource(lat, lon, source_dir):
    """ Return a gdal datasource for a NED 10m lat, lon corner.
    
        If it doesn't already exist locally in source_dir, grab a new one.
    """
    #
    # Create a URL - tdds3.cr.usgs.gov looks to be a redirect from
    # http://gisdata.usgs.gov/TDDS/DownloadFile.php?TYPE=ned3f_zip&FNAME=nxxwxx.zip
    #
    # FIXME for southern/western hemispheres
    fmt = 'http://tdds3.cr.usgs.gov/Ortho9/ned/ned_13/float/n%02dw%03d.zip'
    url = fmt % (abs(lat), abs(lon))

    #
    # Create a local filepath
    #
    s, host, path, p, q, f = urlparse(url)

    local_dir = md5(url).hexdigest()[:3]
    local_dir = join(source_dir, local_dir)

    local_base = join(local_dir, basename(path)[:-4])
    local_path = local_base + '.flt'
    local_none = local_base + '.404'

    #
    # Check if the file exists locally
    #
    if exists(local_path):
        return gdal.Open(local_path, gdal.GA_ReadOnly)

    if exists(local_none):
        return None

    if not exists(local_dir):
        makedirs(local_dir)
        chmod(local_dir, 0777)

    assert isdir(local_dir)

    #
    # Grab a fresh remote copy
    #
    print >> stderr, 'Retrieving', url, 'in DEM.NED10m.datasource().'

    conn = HTTPConnection(host, 80)
    conn.request('GET', path)
    resp = conn.getresponse()

    if resp.status == 404:
        # we're probably outside the coverage area
        print >> open(local_none, 'w'), url
        return None

    assert resp.status == 200, (resp.status, resp.read())

    try:
        dirpath = mkdtemp(prefix='ned10m-')
        zippath = join(dirpath, 'dem.zip')

        zipfile = open(zippath, 'w')
        zipfile.write(resp.read())
        zipfile.close()

        zipfile = ZipFile(zippath)

        for name in zipfile.namelist():
            if fnmatch(name,
                       '*/*/float*.???') and name[-4:] in ('.hdr', '.flt',
                                                           '.prj'):
                local_file = local_base + name[-4:]

            elif fnmatch(name,
                         '*/float*_13.???') and name[-4:] in ('.hdr', '.flt',
                                                              '.prj'):
                local_file = local_base + name[-4:]

            elif fnmatch(name, '*/float*_13'):
                local_file = local_base + '.flt'

            else:
                # don't recognize the contents of this zip file
                continue

            zipfile.extract(name, dirpath)
            move(join(dirpath, name), local_file)

            if local_file.endswith('.hdr'):
                # GDAL needs some extra hints to understand the raw float data
                hdr_file = open(local_file, 'a')
                print >> hdr_file, 'nbits 32'
                print >> hdr_file, 'pixeltype float'

        #
        # The file better exist locally now
        #
        return gdal.Open(local_path, gdal.GA_ReadOnly)

    finally:
        rmtree(dirpath)
def open_zip(datafile):
    with ZipFile('{0}.zip'.format(datafile), 'r') as myzip:
        myzip.extractall()
Beispiel #22
0
    def update_frame(self, data, text, url, mime, pbresp=None):
        self.setEnabled(False)  # prevent from taking focus

        if 'image' in mime:
            self.setContent(QByteArray(data), mime, QUrl(url))

        elif 'html' in mime:
            if '/embed' in url:
                text = text.replace('<head>',
                                    '<head><script>opener=1</script>')
            self.setHtml(text, QUrl(url))

        elif 'json' in mime:
            if text.startswith(")]}'\n"):
                text = text[5:]
            if text.endswith('/*""*/'):
                text = text[:-6]

            text = text.replace('[,',
                                '[null,').replace(',]', ',null]').replace(
                                    ',,', ',null,').replace(',,', ',null,')

            try:
                text = dumps(loads(text), indent=4).encode('utf8')
            except Exception:
                text = text.encode('utf8')

            self.setContent(QByteArray(text), 'text/plain', QUrl(url))

        elif 'protobuf' in mime:
            data = self.parse_protobuf(data, pbresp)
            self.setContent(QByteArray(data), 'text/plain', QUrl(url))

        elif 'kmz' in mime:
            with ZipFile(BytesIO(data)) as fd:
                if fd.namelist() == ['doc.kml']:
                    self.setContent(
                        QByteArray(
                            parseString(fd.read('doc.kml')).toprettyxml(
                                indent='    ').encode('utf8')), 'text/plain',
                        QUrl(url))
                else:
                    self.setContent(
                        QByteArray('\n'.join(fd.namelist()).encode('utf8')),
                        'text/plain', QUrl(url))

        elif data.startswith(b'XHR1'):
            data = BytesIO(data[4:])
            out = b''

            while True:
                header = data.read(6)
                if not header:
                    break
                size, index = unpack('>IBx', header)

                dec = bytes([i ^ 0x9b for i in data.read(size - 2)])
                if dec.startswith(b'\x78\x9c'):
                    dec = decompress(dec)

                out += b'%d %s\n' % (index, b'-' * 15)
                out += self.parse_protobuf(dec, pbresp)

            self.setContent(QByteArray(out), 'text/plain', QUrl(url))

        elif 'text/' in mime:
            self.setContent(QByteArray(data), 'text/plain', QUrl(url))

        else:
            for key in (0x9b, 0x5f):
                dec = bytes([i ^ key for i in data])

                try:
                    dec = decompress(dec, -15)
                except Exception:
                    try:
                        dec = decompress(dec)
                    except Exception:
                        pass

                dec = self.parse_protobuf(dec, pbresp)
                if dec:
                    break

            if not dec:
                dec = run(['hexdump', '-C'], input=data, stdout=PIPE).stdout

            self.setContent(QByteArray(dec[:500000]), 'text/plain', QUrl(url))

        self.setEnabled(True)
    author={Wong, Alex and Safa Cicek and Soatto, Stefano},
    booktitle={Advances in neural information processing systems},
    year={2020}
}
'''
import os, gdown
from zipfile import ZipFile

MONODEPTH2_STEREO_MODEL_URL = 'https://drive.google.com/uc?id=1ArW1Tr9-Clukepy0_olWw8AHMbigOXTH'

MONODEPTH2_MODELS_DIRPATH = os.path.join('pretrained_models', 'monodepth2')
MONODEPTH2_STEREO_MODEL_FILENAME = 'stereo_640x192.zip'
MONODEPTH2_STEREO_MODEL_FILEPATH = os.path.join(
    MONODEPTH2_MODELS_DIRPATH, MONODEPTH2_STEREO_MODEL_FILENAME)

if not os.path.exists(MONODEPTH2_MODELS_DIRPATH):
    os.makedirs(MONODEPTH2_MODELS_DIRPATH)

if not os.path.exists(MONODEPTH2_STEREO_MODEL_FILEPATH):
    print('Downloading monodepth2 stereo 640x192 model to {}'.format(
        MONODEPTH2_STEREO_MODEL_FILEPATH))
    gdown.download(MONODEPTH2_STEREO_MODEL_URL,
                   MONODEPTH2_STEREO_MODEL_FILEPATH,
                   quiet=False)
else:
    print('Found monodepth2 stereo 640x192 model in {}'.format(
        MONODEPTH2_STEREO_MODEL_FILEPATH))

with ZipFile(MONODEPTH2_STEREO_MODEL_FILEPATH, 'r') as zip_file:
    zip_file.extractall(MONODEPTH2_MODELS_DIRPATH)
def is_kzip(fname):
    try:
        ZipFile(fname)
        return True
    except:
        return False
Beispiel #25
0
def create_workflow(drops, cwl_filename, buffer):
    """
    Create a CWL workflow from a given Physical Graph Template

    A CWL workflow consists of multiple files. A single file describing the
    workflow, and multiple files each describing one step in the workflow. All
    the files are combined into one zip file, so that a single file can be
    downloaded by the user.

    NOTE: CWL only supports workflow steps that are bash shell applications
          Non-BashShellApp nodes are unable to be implemented in CWL
    """

    # search the drops for non-BashShellApp drops,
    # if found, the graph cannot be translated into CWL
    for index, node in enumerate(drops):
        dataType = node.get('dt', '')
        if dataType not in SUPPORTED_CATEGORIES:
            raise Exception('Node {0} has an unsupported category: {1}'.format(
                index, dataType))

    # create list for command line tool description files
    step_files = []

    # create the workflow
    cwl_workflow = cwlgen.Workflow('', label='', doc='', cwl_version='v1.0')

    # create files dictionary
    files = {}

    # look for input and output files in the pg_spec
    for index, node in enumerate(drops):
        command = node.get('command', None)
        dataType = node.get('dt', None)
        outputId = node.get('oid', None)
        outputs = node.get('outputs', [])

        if len(outputs) > 0:
            files[outputs[0]] = "step" + str(index) + "/output_file_0"

    # add steps to the workflow
    for index, node in enumerate(drops):
        dataType = node.get('dt', '')

        if dataType == 'BashShellApp':
            name = node.get('nm', '')
            inputs = node.get('inputs', [])
            outputs = node.get('outputs', [])

            # create command line tool description
            filename = "step" + str(index) + ".cwl"
            contents = create_command_line_tool(node)

            # add contents of command line tool description to list of step files
            step_files.append({"filename": filename, "contents": contents})

            # create step
            step = cwlgen.WorkflowStep("step" + str(index), run=filename)

            # add input to step
            for index, input in enumerate(inputs):
                step.inputs.append(
                    cwlgen.WorkflowStepInput('input_file_' + str(index),
                                             source=files[input]))

            # add output to step
            for index, output in enumerate(outputs):
                step.out.append(
                    cwlgen.WorkflowStepOutput('output_file_' + str(index)))

            # add step to workflow
            cwl_workflow.steps.append(step)

    # put workflow and command line tool description files all together in a zip
    zipObj = ZipFile(buffer, 'w')
    for step_file in step_files:
        zipObj.writestr(step_file["filename"], six.b(step_file["contents"]))
    zipObj.writestr(cwl_filename, six.b(cwl_workflow.export_string()))
    zipObj.close()
Beispiel #26
0
 def __enter__(self):
     self.open_zip = ZipFile(self.filename, self.mode)
     return self
Beispiel #27
0
        EMBEDDINGS[word] = vector

with open("utils/glove.twitter.27B.25d.2.txt", 'r', encoding='utf8') as f:
    for line in f:
        values = line.split()
        word = values[0]
        vector = values[1:]
        EMBEDDINGS[word] = vector

with open("utils/glove.twitter.27B.25d.3.txt", 'r', encoding='utf8') as f:
    for line in f:
        values = line.split()
        word = values[0]
        vector = values[1:]
        EMBEDDINGS[word] = vector

# word_list = EMBEDDINGS.keys()

# with open ('glove.txt', 'a', encoding='utf-8') as out_file:
#     out_file.write('\n'.join(word_list))

file_path = "utils/glove.zip/glove.txt"
if ".zip/" in file_path:
    archive_path = os.path.abspath(file_path)
    split = archive_path.split(".zip/")
    archive_path = split[0] + ".zip"
    path_inside = split[1]
    archive = ZipFile(archive_path, "r")
    WORD_LIST = archive.read(path_inside).decode("utf8").split("\n")

WORD_LIST = [x.strip() for x in WORD_LIST]
Beispiel #28
0
        print("MBSpeech dataset folder already exists")
        sys.exit(0)
    else:
        bible_books = ['01_Genesis', '02_Exodus', '03_Leviticus']
        for bible_book_name in bible_books:
            bible_book_file_name = '%s.zip' % bible_book_name
            bible_book_file_path = os.path.join(datasets_path,
                                                bible_book_file_name)
            if not os.path.isfile(bible_book_file_path):
                url = "https://s3.us-east-2.amazonaws.com/bible.davarpartners.com/Mongolian/" + bible_book_file_name
                download_file(url, bible_book_file_path)
            else:
                print("'%s' already exists" % bible_book_file_name)

            print("extracting '%s'..." % bible_book_file_name)
            zipfile = ZipFile(bible_book_file_path)
            zipfile.extractall(datasets_path)

    dataset_csv_file_path = os.path.join(datasets_path,
                                         '%s-csv.zip' % dataset_name)
    dataset_csv_extracted_path = os.path.join(datasets_path,
                                              '%s-csv' % dataset_name)
    if not os.path.isfile(dataset_csv_file_path):
        url = "https://www.dropbox.com/s/dafueq0w278lbz6/%s-csv.zip?dl=1" % dataset_name
        download_file(url, dataset_csv_file_path)
    else:
        print("'%s' already exists" % dataset_csv_file_path)

    print("extracting '%s'..." % dataset_csv_file_path)
    zipfile = ZipFile(dataset_csv_file_path)
    zipfile.extractall(datasets_path)
Beispiel #29
0
def unzip(filepath, dest):
    zipfile = ZipFile(filepath)
    zipfile.extractall(dest)
    zipfile.close()
# Step-3: give authorization to KaggleApi
from kaggle.api.kaggle_api_extended import KaggleApi
api = KaggleApi()
api.authenticate()

!chmod 600 ~/.kaggle/kaggle.json

# Step-4: Download datasets from kaggle using API
# !kaggle datasets download -d ashishjangra27/face-mask-12k-images-dataset
!kaggle datasets download -d prithwirajmitra/covid-face-mask-detection-dataset

"""# **2. Load Dataset**"""

# unzip the dataset
from zipfile import ZipFile
zf = ZipFile('/content/covid-face-mask-detection-dataset.zip')
zf.extractall('/content/sample_data') #save files in selected folder
zf.close()

"""# **3. Import Libraries**"""

import os
import cv2
import numpy as np
import random
import keras

from imutils import paths
import matplotlib.pyplot as plt
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array