def read_from_text(cls, path, delimiter, nrows, verbose): """ Load RDD from a text file """ # TODO handle nrows, verbose cls._entry(path=path, delimiter=delimiter, nrows=nrows) sc = CommonSparkContext.spark_context() if delimiter is None: rdd = sc.textFile(path) res = rdd.map(lambda line: line.encode('utf-8')) else: conf = {'textinputformat.record.delimiter': delimiter} rdd = sc.newAPIHadoopFile( path, "org.apache.hadoop.mapreduce.lib.input.TextInputFormat", "org.apache.hadoop.io.Text", "org.apache.hadoop.io.Text", conf=conf) def fixup_line(line): return str(line).replace('\n', ' ').strip() res = rdd.values().map(lambda line: fixup_line(line)) lineage = Lineage.init_array_lineage(path) return XArrayImpl(res, str, lineage)
def _rv_frame(rdd, col_names=None, col_types=None, lineage=None): """ Return a new XFrameImpl containing the rdd, column names, and element types """ # noinspection PyUnresolvedReferences lineage = lineage or Lineage.init_frame_lineage(Lineage.RDD, col_names) return xframes.xframe_impl.XFrameImpl(rdd, col_names, col_types, lineage)
def load_from_const(cls, value, size): """ Load RDD from const value. """ cls._entry(value=value, size=size) values = [value for _ in xrange(0, size)] sc = CommonSparkContext.spark_context() return cls(XRdd(sc.parallelize(values)), type(value), Lineage.init_array_lineage(Lineage.CONST))
def load_from_iterable(cls, values, dtype, ignore_cast_failure): """ Load RDD from values given by iterable. Note ---- Values must not only be iterable, but also it must support len and __getitem__ Modifies the existing RDD: does not return a new XArray. """ cls._entry(dtype=dtype, ignore_cast_failure=ignore_cast_failure) dtype = dtype or None sc = CommonSparkContext.spark_context() try: if len(values) == 0: dtype = dtype or infer_type_of_list(values[0:100]) return XArrayImpl(XRdd(sc.parallelize([])), dtype) except TypeError: # get here if values does not support len or __getitem pass if dtype is None: # try iterating and see if we get something cpy = copy.copy(values) for val in cpy: dtype = infer_type_of_list([val]) break if dtype is None: raise TypeError('Cannot determine types.') # noinspection PyShadowingNames def do_cast(x, dtype, ignore_cast_failure): if is_missing(x): return x if isinstance(x, str) and dtype is datetime.datetime: return date_parser.parse(x) if isinstance(x, dtype): return x try: return dtype(x) except (ValueError, TypeError): # TODO: this does not seem to catch as it should return None if ignore_cast_failure else ValueError raw_rdd = XRdd(sc.parallelize(values)) rdd = raw_rdd.map(lambda x: do_cast(x, dtype, ignore_cast_failure)) if not ignore_cast_failure: errs = len(rdd.filter(lambda x: x is ValueError).take(1)) == 1 if errs: raise ValueError return cls(rdd, dtype, Lineage.init_array_lineage(Lineage.PROGRAM))
def create_sequential_xarray(size, start, reverse): """ Create RDD with sequential integer values of given size and starting pos. """ if not reverse: stop = start + size step = 1 else: stop = start - size step = -1 sc = CommonSparkContext.spark_context() rdd = XRdd(sc.parallelize(range(start, stop, step))) return XArrayImpl(rdd, int, Lineage.init_array_lineage(Lineage.RANGE))
def __init__(self, rdd=None, elem_type=None, lineage=None): # The RDD holds all the data for the XArray. # The rows must be of a single type. # Types permitted include int, long, float, string, list, and dict. # We record the element type here. self._entry(elem_type=elem_type) if rdd is None: sc = CommonSparkContext.spark_context() rdd = XRdd(sc.parallelize([])) super(XArrayImpl, self).__init__(rdd) self.elem_type = elem_type self.lineage = lineage or Lineage.init_array_lineage(Lineage.EMPTY) self.materialized = False self.iter_pos = 0
def load_autodetect(cls, path, dtype): """ Load from the given path. This can be anything that spark will read from: local file or HDFS file. It can also be a directory, and spark will read and concatenate them all. """ # Read the file as string # Examine the first 100 lines, and cast if necessary to int, float, or datetime cls._entry(path=path, dtype=dtype) # If the path is a directory, then look for sarray-data file in the directory. # If the path is a file, look for that file # Use type inference to determine the element type. # Passed-in dtype is always str and is ignored. lineage = Lineage.init_array_lineage(path) sc = CommonSparkContext.spark_context() if os.path.isdir(path): res = XRdd(sc.pickleFile(path)) metadata_path = os.path.join(path, '_metadata') with fileio.open_file(metadata_path) as f: dtype = pickle.load(f) lineage_path = os.path.join(path, '_lineage') if fileio.exists(lineage_path): lineage = Lineage.load(lineage_path) else: res = XRdd(sc.textFile(path, use_unicode=False)) dtype = infer_type(res) if dtype != str: if dtype in (list, dict): res = res.map(lambda x: ast.literal_eval(x)) elif dtype is datetime.datetime: res = res.map(lambda x: date_parser.parse(x)) else: res = res.map(lambda x: dtype(x)) return cls(res, dtype, lineage)
def copy_range(self, start, step, stop): """ Returns an RDD consisting of the values between start and stop, counting by step. """ self._entry(start=start, step=step, stop=stop) # noinspection PyShadowingNames def select_row(x, start, step, stop): if x < start or x >= stop: return False return (x - start) % step == 0 pairs = self._rdd.zipWithIndex() res = pairs.filter(lambda x: select_row(x[1], start, step, stop)).map(lambda x: x[0]) lineage = Lineage.init_array_lineage(Lineage.RANGE) return self._rv(res, lineage=lineage)
def copy_range(self, start, step, stop): """ Returns an RDD consisting of the values between start and stop, counting by step. """ self._entry(start=start, step=step, stop=stop) # noinspection PyShadowingNames def select_row(x, start, step, stop): if x < start or x >= stop: return False return (x - start) % step == 0 pairs = self._rdd.zipWithIndex() res = pairs.filter(lambda x: select_row(x[1], start, step, stop)).map( lambda x: x[0]) lineage = Lineage.init_array_lineage(Lineage.RANGE) return self._rv(res, lineage=lineage)
def read_from_text(cls, path, delimiter, nrows, verbose): """ Load RDD from a text file """ # TODO handle nrows, verbose cls._entry(path=path, delimiter=delimiter, nrows=nrows) sc = CommonSparkContext.spark_context() if delimiter is None: rdd = sc.textFile(path) res = rdd.map(lambda line: line.encode('utf-8')) else: conf = {'textinputformat.record.delimiter': delimiter} rdd = sc.newAPIHadoopFile(path, "org.apache.hadoop.mapreduce.lib.input.TextInputFormat", "org.apache.hadoop.io.Text", "org.apache.hadoop.io.Text", conf=conf) def fixup_line(line): return str(line).replace('\n', ' ').strip() res = rdd.values().map(lambda line: fixup_line(line)) lineage = Lineage.init_array_lineage(path) return XArrayImpl(res, str, lineage)
def from_rdd(cls, rdd, dtype, lineage=None): if lineage: cls(rdd, dtype, lineage=Lineage.from_dict(lineage)) return cls(rdd, dtype, Lineage.init_array_lineage(Lineage.RDD))