Пример #1
0
 def from_pretrained(self):
     checkpoint_path = module_path + "/{}.pt".format(self.model_name)
     if not os.path.exists(checkpoint_path):
         checkpoint_url = f'https://drive.google.com/uc?id={model_urls[self.model_name]}'
         gdown.cached_download(checkpoint_url, checkpoint_path)
     self.model.load_state_dict(torch.load(checkpoint_path, map_location=self.device))
     return self
Пример #2
0
def setup_repository():
    # Downloading, extracting models.
    models_url = 'https://drive.google.com/uc?id=1QJZWF9CzgOiYzjzsRSu2LOkrzi2S6j_U'
    models_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               'resources', 'models')
    os.makedirs(models_path, exist_ok=True)
    md5 = '8920cc50fee3505e958307fa11088c0d'
    models_archive_path = os.path.join(
        os.path.dirname(os.path.realpath(__file__)), 'models.zip')
    gdown.cached_download(url=models_url, path=models_archive_path, md5=md5)
    gdown.extractall(path=models_archive_path, to=models_path)
    os.remove(models_archive_path)

    # Setting up the data folder with runtime_config.ini file
    data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             'resources', 'data')
    os.makedirs(data_path, exist_ok=True)
    runtime_config_path = os.path.join(data_path, 'runtime_config.ini')
    if os.path.exists(runtime_config_path):
        os.remove(runtime_config_path)
    pfile = open(runtime_config_path, 'w')
    pfile.write("[Predictions]\n")
    pfile.write("non_overlapping=true\n")
    pfile.write(
        "reconstruction_method=probabilities #probabilities, thresholding\n")
    pfile.write(
        "reconstruction_order=resample_first #resample_first, resample_second\n"
    )
    pfile.write("probability_threshold=0.4\n")
    pfile.close()
Пример #3
0
def download_and_load_model(model_files) -> RecursiveScriptModule:
    """

    Downloads and torch.jit.load the model from google drive, the downloaded model is saved in /tmp
        since in heroku we get /tmp to save all our stuff, if the app is not running in production
        the model must be saved in load storage, hence the model is directly loaded

    Args:
        model_files: the dict containing the model information

    Returns:
        (RecursiveScriptModule): the loaded torch.jit model
    """
    if "PRODUCTION" in os.environ:
        logger.info(
            f"=> Downloading Model {model_files['model_file']} from {model_files['model_url']}"
        )

        # heroku gives you `/tmp` to store files, which can be cached
        model_path: Path = Path("/tmp") / f"{model_files['model_file']}.pt"
        if not model_path.exists():
            gdown.cached_download(url=model_files["model_url"], path=model_path)

        logger.info(f"=> Loading {model_files['model_file']} from download_cache")
        model: RecursiveScriptModule = torch.jit.load(str(model_path))
    else:
        logger.info(f"=> Loading {model_files['model_file']} from Local")
        model = torch.jit.load(
            str((Path("models") / (model_files["model_file"] + ".pt")))
        )

    return model
Пример #4
0
    def creator(path):
        train_path = gdown.cached_download(train_url)
        dev_path = gdown.cached_download(dev_url)
        test_path = gdown.cached_download(test_url)

        dataset = {}
        for split in ("train", "dev", "test"):
            data_path = {
                "train": train_path,
                "dev": dev_path,
                "test": test_path
            }[split]
            with io.open(data_path, "rt", encoding="utf-8") as f:
                data = [json.loads(line) for line in f.readlines()]
            temp = []
            for x in data:
                answer_key = x["answerKey"] if split != "test" else ""
                options = {
                    choice["label"]: choice["text"]
                    for choice in x["question"]["choices"]
                }
                stem = x["question"]["stem"]
                temp.append({
                    "id": x["id"],
                    "answer_key": answer_key,
                    "options": options,
                    "stem": stem
                })
            dataset[split] = temp

        with io.open(path, "wb") as f:
            pickle.dump(dataset, f)
        return dataset
def setup_repository():
    # Downloading, extracting models.
    models_url = 'https://drive.google.com/uc?id=1DBIl8JyXEo6YdM9uNyo3vrv5T2WsYSXT'
    models_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               'resources', 'models')
    os.makedirs(models_path, exist_ok=True)
    md5 = '434775bebd64910e01f4198eab251666'
    models_archive_path = os.path.join(
        os.path.dirname(os.path.realpath(__file__)), 'models.zip')
    gdown.cached_download(url=models_url, path=models_archive_path, md5=md5)
    gdown.extractall(path=models_archive_path, to=models_path)
    os.remove(models_archive_path)

    # Setting up the data folder with runtime_config.ini file
    data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             'resources', 'data')
    os.makedirs(data_path, exist_ok=True)
    runtime_config_path = os.path.join(data_path, 'runtime_config.ini')
    if os.path.exists(runtime_config_path):
        os.remove(runtime_config_path)
    pfile = open(runtime_config_path, 'w')
    pfile.write("[Predictions]\n")
    pfile.write("non_overlapping=true\n")
    pfile.write(
        "reconstruction_method=probabilities #probabilities, thresholding\n")
    pfile.write(
        "reconstruction_order=resample_first #resample_first, resample_second\n"
    )
    pfile.write("probability_threshold=0.4\n")
    pfile.close()
Пример #6
0
def get_bag_file(bag_id):
    if bag_id == "dynamic.desk":
        bag_file = gdown.cached_download(
            url=
            "https://drive.google.com/uc?id=1vaPyJERDNFY7W8VUBT3JvPNXHpZ6Bqun",  # NOQA
            md5="5bfb6eb7f80773dd2b8a6b16a93823e9",
        )
    elif bag_id == "static.robot":
        bag_file = gdown.cached_download(
            url=
            "https://drive.google.com/uc?id=1DwSQLhrtciKCO566R2TqdPc81QOi7tlN",  # NOQA
            md5="ec155c5eacab3d90e72d7b1286c7aff3",
        )
    elif bag_id == "static.desk":
        bag_file = gdown.cached_download(
            url=
            "https://drive.google.com/uc?id=1mArVTWl2f0Uws_mRoCtDzttV2s6AahZa",  # NOQA
            md5="a70a792577447a414c8ac7fe5f4aa316",
        )
    elif bag_id == "static.desk.topdown":
        bag_file = gdown.cached_download(
            url=
            "https://drive.google.com/uc?id=1UxUg4IozQvQNrCALXkzk23v3fBjpFCqZ",  # NOQA
            md5="3625c11cd130a06557f38b8ff390882e",
        )
    else:
        raise ValueError(f"Unknown bag_id: {bag_id}")
    return bag_file
Пример #7
0
def model_init_app(reset=False):
    model_path = current_app.config["MODEL_DIRECTORY"]

    # check if model path exists
    if not os.path.exists(model_path):

        # check if parent directory exists
        parent_dir = os.path.dirname(model_path)
        if not os.path.exists(parent_dir):
            os.mkdir(parent_dir)
            current_app.logger.info(f"Created parent dir {parent_dir}")

        # download and unzip to correct path
        url = 'https://drive.google.com/uc?id=1XwjMGcYIg2qwEKHsC7uSmZayHvnEFhyg'
        output = os.path.join(parent_dir, 'warpgan_pretrained.zip')

        current_app.logger.debug("Downloading pretrained model")
        gdown.cached_download(url,
                              output,
                              quiet=False,
                              postprocess=gdown.extractall)
        current_app.logger.debug("Download complete")

        os.remove(output)  # remove downloaded zip after extraction
    else:
        current_app.logger.info("Pretrained model found - loading local file")

    if reset:
        pass

    network = WarpGAN()
    network.load_model(model_path)

    return network
Пример #8
0
 def download(self):
     gdown.cached_download(
         url=
         "https://drive.google.com/uc?id=1l0ki7dX1WxcmV5Tfm41FPW-yk-wKUfne",  # NOQA
         path=self.root_dir + ".zip",
         postprocess=gdown.extractall,
     )
def load_dataset():
    """Load and return the dataset
    Returns
    -------
    data : TODO
    --------
    """
    destination = os.path.join(DATA_DIR, 'dataset.jsons.gz')
    gdown.cached_download(dataset_url,
                          destination,
                          md5='a8f860b2dc400e14d4e7083775c1308a',
                          quiet=False)

    if not os.path.isfile(destination):
        # Data not cached, download
        urllib.request.urlretrieve(dataset_url, destination)

    source_file = tf.constant([destination])

    dataset = tf.data.TextLineDataset(filenames=source_file,
                                      compression_type='GZIP')
    dataset = dataset.map(
        lambda x: tf.py_function(_serialize, [x], [tf.string]))
    dataset = dataset.map(_parse)

    return dataset
Пример #10
0
def pr2_urdfpath():
    gdown.cached_download(
        url='https://drive.google.com/uc?id=1zy4C665o6efPko7eMk4XBdHbvgFfdC-6',
        path=osp.join(download_dir, 'pr2_description.tar.gz'),
        md5='e4fb915accdb3568a5524c92e9c35c9a',
        postprocess=gdown.extractall,
    )
    return osp.join(download_dir, 'pr2_description/pr2.urdf')
Пример #11
0
def panda_urdfpath():
    gdown.cached_download(
        url='https://drive.google.com/uc?id=1h6ib9jpEUNa1xB2DNrnRQtqpSD2Rj9bz',
        path=osp.join(download_dir, 'franka_description.tar.gz'),
        md5='3de5bd15262b519e3beb88f1422032ac',
        postprocess=gdown.extractall,
    )
    return osp.join(download_dir, 'franka_description/panda.urdf')
Пример #12
0
def main():
    # __ https://drive.google.com/file/d/1zo4BkS8wqqbc7gxYQMedhqCiQUb51LAp/view?usp=sharing
    url = "https://drive.google.com/uc?id=0B9P1L--7Wd2vNm9zMTJWOGxobkU"
    output = "20150428_collected_images.tgz"
    gdown.download(url, output, quiet=False)
    md5 = "fa837a88f0c40c513d975104edf3da17"
    gdown.cached_download(url, output, md5=md5, postprocess=gdown.extractall)
    gdown.download(url)
Пример #13
0
def contrib_file(download_dir):
    """Download the contributon file
    """
    contrib_file = download_dir / 'contrib.deeplift.h5'
    url = 'https://drive.google.com/uc?id=1-70VlFvcOCwwt4SrEXoqkaXyBQPnlQGZ'
    md5 = '56e456f0d1aeffc9d3fcdfead0520c17'
    gdown.cached_download(url, str(contrib_file), md5=md5)
    return contrib_file
Пример #14
0
def fetch_urdfpath():
    gdown.cached_download(
        url='https://drive.google.com/uc?id=1y7Jc3QoVW6J072CrSNupfKpyLp4NNxuH',
        path=osp.join(download_dir, 'fetch_description.tar.gz'),
        md5='fbe29ab5f3d029d165a625175b43a265',
        postprocess=gdown.extractall,
    )
    return osp.join(download_dir, 'fetch_description/fetch.urdf')
def gdrive_download(url, folder, filename):
    cache_dir = os.path.join(os.getenv("HOME"), ".cache", "torchfly", folder)
    os.makedirs(cache_dir, exist_ok=True)

    filepath = os.path.join(cache_dir, filename)
    gdown.cached_download(url, filepath, quiet=False)

    return filepath
Пример #16
0
def download(url, path=None, cached=True):
    import gdown
    # kitti_url = 'https://drive.google.com/uc?id=1QHvE8oHlHqXB97RHlulLuWCmleE0wZ8B'
    # kitti_out = 'kitti.tar.bz2'
    gdown.cached_download(url,
                          path=path,
                          quiet=False,
                          proxy=False,
                          postprocess=gdown.extractall)
Пример #17
0
 def download(cls) -> None:
     url: str = "https://drive.google.com/uc?id=1if4VoEXNx9W3XCn0Y7Fp15B4GpcYbyYi"  # NOQA
     md5 = None  # 'c9122e177a766a9691cab13c5cda41a9'
     gdown.cached_download(
         url=url,
         path=cls._root_dir + ".zip",
         md5=md5,
         postprocess=gdown.extractall,
     )
Пример #18
0
def bunny_objpath():
    target_path = osp.join(download_dir, 'mesh', 'bunny.obj')
    gdown.cached_download(
        url='https://drive.google.com/uc?id=18aAYzBglAGaSwes6oAENcufD8KC5XDph',
        path=target_path,
        md5='19bd31bde1fcf5242a8a82ed4ac03c72',
        quiet=True,
    )
    return target_path
Пример #19
0
    def download(self):
        logger.info(
            f"=> Downloading GDrive file {self.file_name} from {self.gdrive_url}"
        )

        file_path: Path = get_temp_folder() / f"{self.file_name}"
        if not file_path.exists():
            gdown.cached_download(url=self.gdrive_url, path=file_path)

        return file_path
Пример #20
0
def download_ckpt(data_dir, name, download_info):
    if os.path.exists(join(data_dir, 'checkpoints', name)):
        print('\nINFO: {} model already downloaded.'.format(name))
    else:
        print('\nINFO: Downloading {} model...'.format(name))
        url = 'https://drive.google.com/uc?id=' + download_info['url']
        zip_output = join(data_dir, 'checkpoints', name + '.zip')
        md5 = download_info['md5']
        gdown.cached_download(url, zip_output, md5=md5, postprocess=gdown.extractall, quiet=True)
        os.remove(zip_output)
Пример #21
0
 def _download_unity(self):
     url = "https://drive.google.com/uc?id=" + APP_GDRIVE_ID[platform]
     os.makedirs("binary", exist_ok=True)
     # zip_path = os.path.join("binary", APP_FILE_NAME[platform])
     zip_path = APP_FILE_NAME[platform]
     if os.path.exists(zip_path):
         logger.info("%s is already downloaded.", zip_path)
     else:
         logger.info("Downloading Unity app from %s", url)
         gdown.cached_download(url, zip_path, postprocess=gdown.extractall)
Пример #22
0
    def download(cls) -> None:
        url: str = "https://drive.google.com/uc?id=1PKEJ8SVIVLukvmeIBexp6_XmJetHoOf2"  # NOQA
        md5: str = "540c37435e4a16546850a83690b2db9b"

        gdown.cached_download(
            url=url,
            path=cls._root_dir + ".zip",
            md5=md5,
            postprocess=gdown.extractall,
        )
Пример #23
0
def _SSD_VGG_VOC_weights():
    import gdown

    url = 'https://drive.google.com/uc?id=1GP6i9lcEqK3mOgr7soxruLhWpITxShge'
    output = Path.home() / '.anchors-jax/ssd-coco.jax'
    output.parent.mkdir(exist_ok=True, parents=True)
    gdown.cached_download(url, str(output))

    params = pickle.load(output.open('rb'))

    return _split_bb_ssd_params(params)
Пример #24
0
 def get_path(cls, quiet=True):
     """
     returns the mnist folder, downloading the mnist data if needed
     """
     if cls._path is None:
         url = 'https://drive.google.com/uc?id=1im9_agM-hHl8dKQU5uWh054rScjPINtz' # jeff hykin's google drive copy of mnist
         output = f'{temp_folder_path}/mnist.zip'
         gdown.cached_download(url, output, postprocess=gdown.extractall, quiet=quiet)
         
         cls._path = temp_folder_path+"/mnist"
     return cls._path
Пример #25
0
    def creator(path):
        dataset = {}
        for split in ('train', 'dev', 'test'):
            en_path = gdown.cached_download(en_url.format(split))
            ja_path = gdown.cached_download(ja_url.format(split))
            with io.open(en_path, 'rt') as en, io.open(ja_path, 'rt') as ja:
                dataset[split] = [(x.rstrip(os.linesep), y.rstrip(os.linesep))
                                  for x, y in zip(en, ja)]

        with io.open(path, 'wb') as f:
            pickle.dump(dataset, f)
        return dataset
Пример #26
0
def setup_repository():
    # Downloading, extracting models.
    models_url = 'https://drive.google.com/uc?id=1ga08d8QQfAHOgTSKiPpIN7f_owuicNUA'
    models_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               'resources', 'models')
    os.makedirs(models_path, exist_ok=True)
    md5 = '55c66e000de9077e483635029f740901'
    models_archive_path = os.path.join(
        os.path.dirname(os.path.realpath(__file__)), 'models.zip')
    gdown.cached_download(url=models_url, path=models_archive_path, md5=md5)
    gdown.extractall(path=models_archive_path, to=models_path)
    os.remove(models_archive_path)
Пример #27
0
 def __init__(self):
     root_dir = morefusion.utils.get_data_path(
         "wkentaro/morefusion/ycb_video/real_data/20191212_163242.566559922"
     )
     gdown.cached_download(
         url=
         "https://drive.google.com/uc?id=1llWN7MOLzJZnaRDD4XGSmRWAFBtP3P9z",  # NOQA
         md5="a773bb947377811b2b66ab9bc17f4d8d",
         path=root_dir + ".zip",
     )
     self._dataset = morefusion.datasets.MyRealRGBDPoseEstimationDataset(
         root_dir=root_dir, )
Пример #28
0
 def prepare_data(self):
     if os.path.exists(join(self.root_dir,
                            self.DIR_NAME)) and os.path.isdir(
                                join(self.root_dir, self.DIR_NAME)):
         if get_dir_md5hash(join(self.root_dir,
                                 self.DIR_NAME)) == self.DIR_HASH:
             return True
         raise ValueError('Wrong checksum, delete %s dir' % self.root_dir)
     gdown.cached_download(
         'https://drive.google.com/uc?id=1-yZki-hyVcHKWB4VfqAUaAfBazPyemqN',
         os.path.join(self.root_dir, 'acdc.tar.gz'),
         md5=self.TAR_HASH,
         postprocess=gdown.extractall)
     return True
Пример #29
0
    def _download_dataset(self) -> None:
        base_url = self.module.base_gdrive_url
        all_name_url_md5 = [(item[0], base_url + item[1], item[2])
                            for item in self.module.name_gdriveid_md5]

        for name, gdrive_url, md5 in all_name_url_md5:
            if self.verbose:
                print("Downloading " + name + "...")

            filepath = self.root / name
            gdown.download(gdrive_url, str(filepath), quiet=False)
            gdown.cached_download(gdrive_url, str(filepath), md5=md5)

            self._extract_archive(filepath, remove_archive=True)
Пример #30
0
def load_state_dict_from_url(url, name):
    # moslty code from torch.hub
    torch_home = _get_torch_home()
    model_dir = os.path.join(torch_home, "checkpoints")

    os.makedirs(model_dir, exist_ok=True)

    cached_file = os.path.join(model_dir, f"{name}.pth")
    gdown.cached_download(url, path=cached_file, quiet=False)

    state_dict = torch.load(cached_file)["state_dict"]
    state_dict = OrderedDict(
        {key[7:]: value
         for key, value in state_dict.items()})
    return state_dict