def load_autodetect(cls, path, dtype): """ Load from the given path. This can be anything that spark will read from: local file or HDFS file. It can also be a directory, and spark will read and concatenate them all. """ # Read the file as string # Examine the first 100 lines, and cast if necessary to int or float cls._entry(path, dtype) # If the path is a directory, then look for sarray-data file in the directory. # If the path is a file, look for that file # Use type inference to determine the element type. # Passed-in dtype is always str and is ignored. sc = spark_context() if os.path.isdir(path): res = XRdd(sc.pickleFile(path)) metadata_path = os.path.join(path, '_metadata') with open(metadata_path) as f: dtype = pickle.load(f) else: res = XRdd(sc.textFile(path, use_unicode=False)) dtype = infer_type(res) if dtype != str: if dtype in (list, dict): res = res.map(lambda x: ast.literal_eval(x)) else: res = res.map(lambda x: dtype(x)) cls._exit() return cls(res, dtype)
def load_from_const(cls, value, size): """ Load RDD from const value. """ cls._entry(value, size) values = [value for _ in xrange(0, size)] sc = spark_context() cls._exit() return cls(XRdd(sc.parallelize(values)), type(value))
def create_sequential_xarray(size, start, reverse): """ Create RDD with sequential integer values of given size and starting pos. """ if not reverse: stop = start + size step = 1 else: stop = start - size step = -1 sc = spark_context() rdd = XRdd(sc.parallelize(range(start, stop, step))) return XArrayImpl(rdd, int)
def load_from_iterable(cls, values, dtype, ignore_cast_failure): """ Load RDD from values given by iterable. Note ---- Values must not only be iterable, but also it must support len and __getitem__ Modifies the existing RDD: does not return a new XArray. """ cls._entry(values, dtype, ignore_cast_failure) dtype = dtype or None sc = spark_context() try: if len(values) == 0: cls._exit() return XArrayImpl(XRdd(sc.parallelize([])), dtype) dtype = dtype or infer_type_of_list(values[0:100]) except TypeError: # get here if values does not support len or __getitem pass if dtype is None: # try iterating and see if we get something cpy = copy.copy(values) for val in cpy: dtype = infer_type_of_list([val]) break if dtype is None: raise TypeError('Cannot determine types.') def do_cast(x, dtype, ignore_cast_failure): if is_missing(x): return x if type(x) == dtype: return x try: return dtype(x) except (ValueError, TypeError): # TODO: this does not seem to cach as it should return None if ignore_cast_failure else ValueError raw_rdd = XRdd(sc.parallelize(values)) rdd = raw_rdd.map(lambda x: do_cast(x, dtype, ignore_cast_failure)) if not ignore_cast_failure: errs = len(rdd.filter(lambda x: x is ValueError).take(1)) == 1 if errs: raise ValueError cls._exit() return cls(rdd, dtype)
def make_internal_url(url): """ Takes a user input url string and translates into url relative to the server process. - URL to a local location begins with "local://" or has no "*://" modifier. If the server is local, returns the absolute path of the url. For example: "local:///tmp/foo" -> "/tmp/foo" and "./foo" -> os.path.abspath("./foo"). If the server is not local, raise NotImplementedError. - URL to a server location begins with "remote://". Returns the absolute path after the "remote://" modifier. For example: "remote:///tmp/foo" -> "/tmp/foo". - URL to a s3 location begins with "s3://": Returns the s3 URL with credentials filled in using xpatterns.aws.get_aws_credential(). For example: "s3://mybucket/foo" -> "s3://$AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY:mybucket/foo". - URL to other remote locations, e.g. http://, will remain as is. - Expands ~ to $HOME Parameters ---------- string A URL (as described above). Raises ------ ValueError If a bad url is provided. """ if not url: raise ValueError('Invalid url: %s' % url) # Try to split the url into (protocol, path). urlsplit = url.split("://") if len(urlsplit) == 2: protocol, path = urlsplit if not path: raise ValueError('Invalid url: %s' % url) if protocol in ['http', 'https']: # protocol is a remote url not on server, just return return url elif protocol == 'hdfs': if not has_hdfs(): raise ValueError('HDFS URL is not supported because Hadoop not found. ' 'Please make hadoop available from PATH or set the environment variable ' 'HADOOP_HOME and try again.') else: return url elif protocol == 's3': if len(path.split(":")) == 3: # s3 url already contains secret key/id pairs, just return return url else: # s3 url does not contain secret key/id pair, query the environment variables # k, v = get_credentials() # return 's3n://' + k + ':' + v + '@' + path return 's3n://' + path elif protocol == 'remote': # url for files on the server path_on_server = path elif protocol == 'local': # url for files on local client, check if we are connecting to local server # # get spark context, get master, see if it starts with local sc = spark_context() if sc.master.startswith('local'): path_on_server = path else: raise ValueError('Cannot use local URL when connecting to a remote server.') else: raise ValueError('Invalid url protocol {}. Supported url protocols are: ' 'remote://, local://, s3://, https:// and hdfs://'.format(protocol)) elif len(urlsplit) == 1: # expand ~ to $HOME url = os.path.expanduser(url) # url for files on local client, check if we are connecting to local server if True: path_on_server = url else: raise ValueError('Cannot use local URL when connecting to a remote server.') else: raise ValueError('Invalid url: {}.'.format(url)) if path_on_server: return os.path.abspath(os.path.expanduser(path_on_server)) else: raise ValueError('Invalid url: {}.'.format(url))