예제 #1
0
def main():
    root = pathlib.Path(__file__).parent / "visda17"
    root.mkdir(exist_ok=True)
    
    files = [
        "train.tar",
        "validation.tar",
        "test.tar",
    ]

    for f in files:
        print(f"[*] Downloading {f}...")
        archive, _ = urllib.request.urlretrieve(f"http://csr.bu.edu/ftp/visda17/clf/{f}", (root / f).as_posix())
        print(f"[*] Extracting {f}...")
        tarfile.extract(archive, root)
def extract_tar(tarfile, dest='.', strip_level=0):
    """
    Extracts a tar file to dest and optionally removes the prefix of files
    :param tarfile: tar file
    :param dest: destination folder
    :param strip_level: remove this number of levels from the compressed filename 
    :return: 
    """
    for member in tarfile.getmembers():
        if member.isreg():
            name_split = member.name.split(os.sep)[strip_level:]
            if not name_split:
                raise ValueError(f'Can not remove {strip_level}'
                                 f' levels from filename: {member.name}')
            member.name = os.path.join(*name_split)
            print(f'Extracting: {member.name}')
            tarfile.extract(member, dest)
예제 #3
0
ch0_list = list(filter(lambda x:"0.C01" in x, ch_list))

# Configuration to run on GPU
configuration = tf.compat.v1.ConfigProto()
configuration.gpu_options.allow_growth = True
#configuration.gpu_options.visible_device_list = "0"

session = tf.compat.v1.Session(config = configuration)

# apply session
tf.compat.v1.keras.backend.set_session(session)

# load reference image to know the image shapes

ref_C01 = input_dir + ch0_list[0]
tarfile.extract(ch0_list[0], input_dir) 
ref_png = png_dir + ch0_list[0].split("/")[-1][:-4] + ".png"

ref_im = utils.preprocess.bfconvert(ref_C01, ref_png)

dim1 = ref_im.shape[0]
dim2 = ref_im.shape[1]

nuclei_model = utils.model_builder.get_model_3_class(dim1, dim2, input_channels)
nuclei_model.load_weights(nuclei_model_file)

cell_model = utils.model_builder.get_model_3_class(dim1,dim2, 1)
cell_model.load_weights(cell_model_file)


lysosome_model = utils.model_builder.get_model_3_class(dim1,dim2,1)
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')

#number of classes to be identified
NUM_CLASSES = 90

# Model gets downloaded

opener_web = urllib.request.URLopener()
opener_web.retrieve(download_url + model_tar, model_tar)
tarfile = tarfile.open(model_tar)
for file in tarfile.getmembers():
    file_name = os.path.basename(file.name)
    if 'frozen_inference_graph.pb' in file_name:
        tarfile.extract(file, os.getcwd())

# ## Loading this Tensorflow model into the memory

detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(path_to_model, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')

# ## Loading label map
# Label maps map indices to category name

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
예제 #5
0
def extract_archive (importFile, origFile, extractDir):
    """
    Extracts an archive to a directory or just copies file if its a raw
    instrument file.

    importFile: Source file to extract (if raw instrument, its just copied)
    origFile: Original file name (used if archive does not contain file names,
              gzip, bzip and sfArk), should not contain path elements
    extractDir: Destination extraction directory
    Returns: True if suppported archive file type, False if not
    """

    # Check if libInstPatch can identify the file
    file = ipatch.File ()
    file.open (importFile, "r")
    fileType = file.identify ()

    # Is it an uncompressed patch file?
    if fileType in PatchTypes:
        # copy raw file into extract directory (using its original name)
        shutil.copy (importFile, extractDir + os.sep + origFile)

    # Is it a CRAM file?
    elif fileType == ipatch.CramFile.__gtype__:
        file = file.convert_type (fileType) # Convert to IpatchFile sub type
        conv = ipatch.CramDecoderConverter () # CRAM decoder converter
        conv.set_property ("path", extractDir) # Set extract directory
        conv.set_property ("strip-paths", True) # Strip paths
        conv.add_input (file)       # Add CRAM file as input
        conv.convert ()             # Decode all files to extractDir

    elif tarfile.is_tarfile (importFile): # Is it tar, tar/gzip or tar/bzip2?
        tar = tarfile.open (importFile, "r")

        for name in tar.getnames (): # Extract each file to extractDir
            tarfile.extract (name, extractDir)

        tar.close ()

    else:                   # Not a tar file, what is it? - Use 'file' utility
        fd = subprocess.Popen([InstDB.FILE_CMD, '-b', importFile], stdout=subprocess.PIPE).stdout
        out = fd.read ()

        if out[:5] == "gzip ":
            fd = gzip.open (importFile)

            fname = extractDir + os.sep + strip_file_ext (origFile, "gz")
            outfd = file (fname)

            while 1:
                data = fd.read (COPYBUFSIZE)
                if not data: break
                outfd.write (data)

            outfd.close ()
            fd.close ()
        elif out[:6] == "bzip2 ":
            fd = bz2.BZ2File (importFile)

            fname = extractDir + os.sep + strip_file_ext (origFile, "bz2")
            outfd = file (fname)

            while 1:
                data = fd.read (COPYBUFSIZE)
                if not data: break
                outfd.write (data)

            outfd.close ()
            fd.close ()
        elif out[:4] == "Zip ":
            retval = subprocess.call ([InstDB.UNZIP_CMD, "-d", extractDir, "-j",
                                      importFile])
            if retval > 2:
                raise InstDB.ImportError, "Unzip of archive file failed (%d)" % retval
        elif out[:4] == "RAR ":
            retval = subprocess.call ([InstDB.UNRAR_CMD, "e", importFile,
                                      extractDir])
            if retval != 0:
                raise InstDB.ImportError, "Unrar of archive file failed (%d)" % retval
        elif out[:6] == "sfArk ":
            # sfarkxtc utility always treats output file name as relative path
            fname = os.path.basename (extractDir) + os.sep \
                + strip_file_ext (origFile, "sfark") + ".sf2"
            retval = subprocess.call ([InstDB.SFARKXTC_CMD, importFile, fname])
            if retval != 0:
                raise InstDB.ImportError, "sfArkXTc failed to extract file (%d)" % retval
        else:
            return False                # Unknown archive file

    return True
예제 #6
0
 def readData(self, station):
     dataPath = os.path.expanduser("~/Downloads/Weather/ghcnd_all.tar.gz")
     with tarfile.open(dataPath) as alltar:
         if station:
             stationFile = tarfile.getmember(station + ".dly")
             tarfile.extract(station, "/tmp")
예제 #7
0
def extractfile(tarfile, filename, hashtxt):
    filenamechk = filename + "_" + hashtxt
    if not os.path.isfile(filenamechk):
        tarfile.extract(filename)
        os.rename(filename, filenamechk)
예제 #8
0
if __name__ == "__main__":
    step_id = str(uuid4())
    dir os.path.abspath(os.path.join(sys.argv[1], os.pardir))
    file = sys.argv[1]
    if os.path.exists(file):
        if file.endswith(".tar"):
            tfile = tarfile.open(file)
            print("<> <%sstep> <#tar_step_%s> ." % (ZRTIFI_ONTOLOGY, step_id))
            print("<#tar_step_%s> <%sprocess> \"tar\" ." % (step_id, ZRTIFI_ONTOLOGY))
            for filename in tarfile:
                if ".." in filename:
                    print("<#tar_step_%s> <%signored> \"%s\" . " % (step_id, ZRTIFI_ONTOLOGY, filename))
                else:
                    targ_dir = os.path.abspath(os.path.join(dir + os.sep + filename,os.pardir))
                    if not os.path.exists(targ_dir):
                        os.makedirs(targ_dir)
                    tarfile.extract(filename, dir + os.sep + filename)
                    file_id = str(uuid4())
                    print("<#file_%s> <http://www.zrtifi.org/internal#next> <sniff> ." % file_id)
                    print("<#file_%s> <http://www.zrtifi.org/internal#nextTarget> <file:%s> ." % (file_id, dir + os.sep + filename))
                    print("<> <%scontains> <#file_%s> ." % (ZRTIFI_ONTOLOGY, file_id))
                    print("<#file_%s> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/ns/dcat#Distribution> ." % file_id)
            print("<#tar_step_%s> <%sstatus> <%ssuccess> ." % (step_id, ZRTIFI_ONTOLOGY, ZRTIFI_ONTOLOGY))
        else:
            print("<#tar_step_%s> <%serror> \"file does not end in .tar\"@en ." % (step_id, ZRTIFI_ONTOLOGY))
            print("<#tar_step_%s> <%sstatus> <%sfailed> ." % (step_id, ZRTIFI_ONTOLOGY, ZRTIFI_ONTOLOGY))
    else:
        print("<#tar_step_%s> <%serror> \"file does not exists\"@en ." % (step_id, ZRTIFI_ONTOLOGY))
        print("<#tar_step_%s> <%sstatus> <%sfailed> ." % (step_id, ZRTIFI_ONTOLOGY, ZRTIFI_ONTOLOGY))