def __init__(self, sparkContext: SparkContext, InputLocation: str, Offset: int, splitter: FileDataSplitter, carryInputData: bool, partitions: int, newLevel: StorageLevel): """ :param sparkContext: SparkContext, the spark context :param InputLocation: str, the input location :param Offset: :param splitter: FileDataSplitter, File data splitter which should be used to split the data :param carryInputData: :param partitions: int, the partitions :param newLevel: """ super().__init__(sparkContext) jvm_splitter = FileSplitterJvm(self._jvm, splitter) new_level_jvm = JvmStorageLevel(self._jvm, newLevel).jvm_instance self._srdd = self._jvm_spatial_rdd( self._jsc, InputLocation, Offset, jvm_splitter.jvm_instance, carryInputData, partitions, new_level_jvm )
def __init__(self, sparkContext: SparkContext, InputLocation: str, splitter: FileDataSplitter, carryInputData: bool, newLevel: StorageLevel, sourceEpsgCRSCode: str, targetEpsgCode: str): """ :param sparkContext: SparkContext, the spark context :param InputLocation: str, the input location :param splitter: FileDataSplitter, File data splitter which should be used to split the data :param carryInputData: bool, :param newLevel: :param sourceEpsgCRSCode: str, the source epsg CRS code :param targetEpsgCode: str, the target epsg code """ super().__init__(sparkContext) jvm_splitter = FileSplitterJvm(self._jvm, splitter) new_level_jvm = JvmStorageLevel(self._jvm, newLevel).jvm_instance self._srdd = self._jvm_spatial_rdd( self._jsc, InputLocation, jvm_splitter.jvm_instance, carryInputData, new_level_jvm, sourceEpsgCRSCode, targetEpsgCode )
def __init__(self, sparkContext: SparkContext, InputLocation: str, startOffset: int, endOffset: int, splitter: FileDataSplitter, carryInputData: bool, newLevel: StorageLevel, sourceEpsgCRSCode: str, targetEpsgCode: str): """ :param sparkContext: SparkContext instance :param InputLocation: str, location for loaded file :param startOffset: int, starting offset :param endOffset: int, ending offset :param splitter: FileDataSplitter, data file splitter :param carryInputData: bool, if spatial rdd should keep non geometry attributes :param newLevel: StorageLevel :param sourceEpsgCRSCode: str, epsg code which loaded files is in, ex. epsg:4326 stands for WGS84 :param targetEpsgCode: str, epsg code to transform SpatialRDD """ super().__init__(sparkContext) jvm_splitter = FileSplitterJvm(self._jvm, splitter).jvm_instance new_level_jvm = JvmStorageLevel(self._jvm, newLevel).jvm_instance self._srdd = self._jvm_spatial_rdd(self._jsc, InputLocation, startOffset, endOffset, jvm_splitter, carryInputData, new_level_jvm, sourceEpsgCRSCode, targetEpsgCode)
def __init__(self, sparkContext: SparkContext, InputLocation: str, splitter: FileDataSplitter, carryInputData: bool): """ :param sparkContext: SparkContext instance :param InputLocation: str, location for loaded file :param splitter: FileDataSplitter, data file splitter :param carryInputData: bool, if spatial rdd should keep non geometry attributes """ super().__init__(sparkContext) jvm_splitter = FileSplitterJvm(self._jvm, splitter).jvm_instance self._srdd = self._jvm_spatial_rdd(self._jsc, InputLocation, jvm_splitter, carryInputData)
def __init__(self, sparkContext: SparkContext, InputLocation: str, splitter: FileDataSplitter, carryInputData: bool): """ :param sparkContext: SparkContext, the spark context :param InputLocation: str, the input location :param splitter: FileDataSplitter, File data splitter which should be used to split the data :param carryInputData: """ super().__init__(sparkContext) jvm_splitter = FileSplitterJvm(self._jvm, splitter) self._srdd = self._jvm_spatial_rdd(self._jsc, InputLocation, jvm_splitter.jvm_instance, carryInputData)
def __init__(self, sparkContext: SparkContext, InputLocation: str, Offset: int, splitter: FileDataSplitter, carryInputData: bool, partitions: int): """ :param sparkContext: SparkContext instance :param InputLocation: str, location for loaded file :param Offset: int, point offset int :param splitter: FileDataSplitter, data file splitter :param carryInputData: bool, if spatial rdd should keep non geometry attributes :param partitions: int, number of partitions int """ super().__init__(sparkContext) jvm_splitter = FileSplitterJvm(self._jvm, splitter).jvm_instance self._srdd = self._jvm_spatial_rdd(sparkContext._jsc, InputLocation, Offset, jvm_splitter, carryInputData, partitions)
def __init__(self, sparkContext: SparkContext, InputLocation: str, splitter: FileDataSplitter, carryInputData: bool, partitions: int, newLevel: StorageLevel): """ :param sparkContext: SparkContext instance :param InputLocation: str, location for loaded file :param splitter: FileDataSplitter, data file splitter :param carryInputData: bool, if spatial rdd should keep non geometry attributes :param partitions: int, number of partitions :param newLevel: StorageLevel """ super().__init__(sparkContext) jvm_splitter = FileSplitterJvm(self._jvm, splitter).jvm_instance new_level_jvm = JvmStorageLevel(self._jvm, newLevel).jvm_instance self._srdd = self._jvm_spatial_rdd(self._jsc, InputLocation, jvm_splitter, carryInputData, partitions, new_level_jvm)