def _load_from_original_file_all_amazon_datasets(self,
                                                     URM_path,
                                                     metadata_path=None,
                                                     reviews_path=None):
        # Load data from original

        self._print("loading URM")
        URM_all, URM_timestamp, self.item_original_ID_to_index, self.user_original_ID_to_index = load_CSV_into_SparseBuilder(
            URM_path, separator=",", header=False, timestamp=True)

        loaded_URM_dict = {"URM_all": URM_all, "URM_timestamp": URM_timestamp}

        loaded_ICM_dict = {}
        loaded_ICM_mapper_dict = {}

        if metadata_path is not None:
            self._print("loading metadata")
            ICM_metadata, tokenToFeatureMapper_ICM_metadata, _ = self._loadMetadata(
                metadata_path, if_new_item="ignore")

            ICM_metadata, _, tokenToFeatureMapper_ICM_metadata = remove_features(
                ICM_metadata,
                min_occurrence=5,
                max_percentage_occurrence=0.30,
                reconcile_mapper=tokenToFeatureMapper_ICM_metadata)

            loaded_ICM_dict["ICM_metadata"] = ICM_metadata
            loaded_ICM_mapper_dict[
                "ICM_metadata"] = tokenToFeatureMapper_ICM_metadata

        if reviews_path is not None:
            self._print("loading reviews")
            ICM_reviews, tokenToFeatureMapper_ICM_reviews, _ = self._loadReviews(
                reviews_path, if_new_item="ignore")

            ICM_reviews, _, tokenToFeatureMapper_ICM_reviews = remove_features(
                ICM_reviews,
                min_occurrence=5,
                max_percentage_occurrence=0.30,
                reconcile_mapper=tokenToFeatureMapper_ICM_reviews)

            loaded_ICM_dict["ICM_reviews"] = ICM_reviews
            loaded_ICM_mapper_dict[
                "ICM_reviews"] = tokenToFeatureMapper_ICM_reviews

        loaded_dataset = Dataset(
            dataset_name=self._get_dataset_name(),
            URM_dictionary=loaded_URM_dict,
            ICM_dictionary=loaded_ICM_dict,
            ICM_feature_mapper_dictionary=loaded_ICM_mapper_dict,
            UCM_dictionary=None,
            UCM_feature_mapper_dictionary=None,
            user_original_ID_to_index=self.user_original_ID_to_index,
            item_original_ID_to_index=self.item_original_ID_to_index,
            is_implicit=self.IS_IMPLICIT,
        )

        # Clean temp files
        self._print("cleaning temporary files")

        if metadata_path is not None:
            os.remove(metadata_path)

        if reviews_path is not None:
            os.remove(reviews_path)

        self._print("loading complete")

        return loaded_dataset
    def _load_from_original_file(self):
        # Load data from original

        zipFile_path = self.DATASET_SPLIT_ROOT_FOLDER + self.DATASET_SUBFOLDER

        try:

            dataFile = zipfile.ZipFile(zipFile_path + "ml-100k.zip")

        except (FileNotFoundError, zipfile.BadZipFile):

            self._print("Unable to fild data zip file. Downloading...")

            download_from_URL(self.DATASET_URL, zipFile_path, "ml-100k.zip")

            dataFile = zipfile.ZipFile(zipFile_path + "ml-100k.zip")

        URM_path = dataFile.extract("ml-100k/u.data",
                                    path=zipFile_path + "decompressed/")

        URM_all, URM_timestamp, item_original_ID_to_index, user_original_ID_to_index = load_CSV_into_SparseBuilder(
            URM_path, separator="\t", header=False, timestamp=True)

        loaded_URM_dict = {"URM_all": URM_all, "URM_timestamp": URM_timestamp}

        loaded_dataset = Dataset(
            dataset_name=self._get_dataset_name(),
            URM_dictionary=loaded_URM_dict,
            ICM_dictionary=None,
            ICM_feature_mapper_dictionary=None,
            UCM_dictionary=None,
            UCM_feature_mapper_dictionary=None,
            user_original_ID_to_index=user_original_ID_to_index,
            item_original_ID_to_index=item_original_ID_to_index,
            is_implicit=self.IS_IMPLICIT,
        )

        self._print("cleaning temporary files")

        shutil.rmtree(zipFile_path + "decompressed", ignore_errors=True)

        self._print("loading complete")

        return loaded_dataset
    def _load_from_original_file(self):
        # Load data from original

        print("MovielensHetrec2011: Loading original data")

        zipFile_path =  self.DATASET_SPLIT_ROOT_FOLDER + self.DATASET_SUBFOLDER

        try:

            dataFile = zipfile.ZipFile(zipFile_path + "hetrec2011-movielens-2k-v2.zip")

        except (FileNotFoundError, zipfile.BadZipFile):

            print("MovielensHetrec2011: Unable to fild data zip file. Downloading...")

            downloadFromURL(self.DATASET_URL, zipFile_path, "hetrec2011-movielens-2k-v2.zip")

            dataFile = zipfile.ZipFile(zipFile_path + "hetrec2011-movielens-2k-v2.zip")


        URM_path = dataFile.extract("user_ratedmovies.dat", path=zipFile_path + "decompressed/")


        self.URM_all, self.item_original_ID_to_index, self.user_original_ID_to_index = load_CSV_into_SparseBuilder(URM_path, separator="\t")


        print("MovielensHetrec2011: cleaning temporary files")

        import shutil

        shutil.rmtree(zipFile_path + "decompressed", ignore_errors=True)

        print("MovielensHetrec2011: loading complete")