def _apply_basic_agg(self, agg_type, sort_results=False): """ Parameters ---------- agg_type : str The aggregation function to run. """ result = DataFrame() add_col_values = True ctx = ffi.new('gdf_context*') ctx.flag_sorted = 0 ctx.flag_method = self._method ctx.flag_distinct = 0 val_columns = self._val_columns val_columns_out = self._val_columns result = self._apply_agg(agg_type, result, add_col_values, ctx, val_columns, val_columns_out, sort_result=sort_results) # If a Groupby has one index column and one value column # and as_index is set, return a Series instead of a df if isinstance(val_columns, (str, Number)) and self._as_index: result_series = result[val_columns] idx = index.as_index(result[self._by[0]]) if self.level == 0: idx.name = self._original_index_name else: idx.name = self._by[0] result_series = result_series.set_index(idx) return result_series # TODO: Do MultiIndex here if (self._as_index): idx = index.as_index(result[self._by[0]]) idx.name = self._by[0] result.drop_column(idx.name) if self.level == 0: idx.name = self._original_index_name else: idx.name = self._by[0] result = result.set_index(idx) nvtx_range_pop() return result
def test_nonmatching_index_setitem(nrows): np.random.seed(0) gdf = DataFrame() gdf['a'] = np.random.randint(2147483647, size=nrows) gdf['b'] = np.random.randint(2147483647, size=nrows) gdf = gdf.set_index('b') test_values = np.random.randint(2147483647, size=nrows) gdf['c'] = test_values assert (len(test_values) == len(gdf['c'])) assert (gdf['c'].to_pandas().equals( Series(test_values).set_index(gdf._index).to_pandas()))
def apply_multiindex_or_single_index(self, result): if len(result) == 0: final_result = DataFrame() for col in result.columns: if col not in self._by: final_result[col] = result[col] if len(self._by) == 1 or len(final_result.columns) == 0: dtype = 'float64' if len(self._by) == 1 else 'object' name = self._by[0] if len(self._by) == 1 else None from cudf.dataframe.index import GenericIndex index = GenericIndex(Series([], dtype=dtype)) index.name = name final_result.index = index else: mi = MultiIndex(source_data=result[self._by]) mi.names = self._by final_result.index = mi if len(final_result.columns) == 1 and hasattr(self, "_gotattr"): final_series = Series([], name=final_result.columns[0]) final_series.index = final_result.index return final_series return final_result if len(self._by) == 1: from cudf.dataframe import index idx = index.as_index(result[self._by[0]]) idx.name = self._by[0] result = result.drop(idx.name) if idx.name == self._LEVEL_0_INDEX_NAME: idx.name = self._original_index_name result = result.set_index(idx) return result else: multi_index = MultiIndex(source_data=result[self._by]) final_result = DataFrame() for col in result.columns: if col not in self._by: final_result[col] = result[col] if len(final_result.columns) == 1 and hasattr(self, "_gotattr"): final_series = Series(final_result[final_result.columns[0]]) final_series.name = final_result.columns[0] final_series.index = multi_index return final_series return final_result.set_index(multi_index)
def apply_multiindex_or_single_index(self, result): if len(result) == 0: final_result = DataFrame() for col in result.columns: if col not in self._by: final_result[col] = result[col] if len(self._by) == 1 or len(final_result.columns) == 0: if len(self._by) == 1: dtype = self._df[self._by[0]] else: dtype = 'object' name = self._by[0] if len(self._by) == 1 else None from cudf.dataframe.index import GenericIndex index = GenericIndex(Series([], dtype=dtype)) index.name = name final_result.index = index else: mi = MultiIndex(source_data=result[self._by]) mi.names = self._by final_result.index = mi return final_result if len(self._by) == 1: from cudf.dataframe import index idx = index.as_index(result[self._by[0]]) name = self._by[0] if isinstance(name, str): name = self._by[0].split('+') if name[0] == 'cudfvalcol': idx.name = name[1] else: idx.name = name[0] result = result.drop(self._by[0]) for col in result.columns: if isinstance(col, str): colnames = col.split('+') if colnames[0] == 'cudfvalcol': result[colnames[1]] = result[col] result = result.drop(col) if idx.name == _LEVEL_0_INDEX_NAME: idx.name = self._original_index_name result = result.set_index(idx) return result else: for col in result.columns: if isinstance(col, str): colnames = col.split('+') if colnames[0] == 'cudfvalcol': result[colnames[1]] = result[col] result = result.drop(col) new_by = [] for by in self._by: if isinstance(col, str): splitby = by.split('+') if splitby[0] == 'cudfvalcol': new_by.append(splitby[1]) else: new_by.append(splitby[0]) else: new_by.append(by) self._by = new_by multi_index = MultiIndex(source_data=result[self._by]) final_result = DataFrame() for col in result.columns: if col not in self._by: final_result[col] = result[col] if len(final_result.columns) > 0: return final_result.set_index(multi_index) else: return result.set_index(multi_index)
def apply_multiindex_or_single_index(self, result): if len(result) == 0: final_result = DataFrame() for col in result.columns: if col not in self._by: final_result[col] = result[col] if len(self._by) == 1 or len(final_result.columns) == 0: dtype = 'float64' if len(self._by) == 1 else 'object' name = self._by[0] if len(self._by) == 1 else None from cudf.dataframe.index import GenericIndex index = GenericIndex(Series([], dtype=dtype)) index.name = name final_result.index = index else: levels = [] codes = [] names = [] for by in self._by: levels.append([]) codes.append([]) names.append(by) mi = MultiIndex(levels, codes) mi.names = names final_result.index = mi if len(final_result.columns) == 1 and hasattr(self, "_gotattr"): final_series = Series([], name=final_result.columns[0]) final_series.index = final_result.index return final_series return final_result if len(self._by) == 1: from cudf.dataframe import index idx = index.as_index(result[self._by[0]]) idx.name = self._by[0] result = result.drop(idx.name) if idx.name == self._LEVEL_0_INDEX_NAME: idx.name = self._original_index_name result = result.set_index(idx) return result else: levels = [] codes = DataFrame() names = [] # Note: This is an O(N^2) solution using gpu masking # to compute new codes for the MultiIndex. There may be # a faster solution that could be executed on gpu at the same # time the groupby is calculated. for by in self._by: level = result[by].unique() replaced = result[by].replace(level, range(len(level))) levels.append(level) codes[by] = Series(replaced, dtype="int32") names.append(by) multi_index = MultiIndex(levels=levels, codes=codes, names=names) final_result = DataFrame() for col in result.columns: if col not in self._by: final_result[col] = result[col] if len(final_result.columns) == 1 and hasattr(self, "_gotattr"): final_series = Series(final_result[final_result.columns[0]]) final_series.name = final_result.columns[0] final_series.index = multi_index return final_series return final_result.set_index(multi_index)
def read_csv(filepath_or_buffer, lineterminator='\n', quotechar='"', quoting=0, doublequote=True, header='infer', mangle_dupe_cols=True, usecols=None, sep=',', delimiter=None, delim_whitespace=False, skipinitialspace=False, names=None, dtype=None, skipfooter=0, skiprows=0, dayfirst=False, compression='infer', thousands=None, decimal='.', true_values=None, false_values=None, nrows=None, byte_range=None, skip_blank_lines=True, comment=None, na_values=None, keep_default_na=True, na_filter=True, prefix=None, index_col=None): """ Load and parse a CSV file into a DataFrame Parameters ---------- filepath_or_buffer : str Path of file to be read or a file-like object containing the file. sep : char, default ',' Delimiter to be used. delimiter : char, default None Alternative argument name for sep. delim_whitespace : bool, default False Determines whether to use whitespace as delimiter. lineterminator : char, default '\\n' Character to indicate end of line. skipinitialspace : bool, default False Skip spaces after delimiter. names : list of str, default None List of column names to be used. dtype : list of str or dict of {col: dtype}, default None List of data types in the same order of the column names or a dictionary with column_name:dtype (pandas style). quotechar : char, default '"' Character to indicate start and end of quote item. quoting : str or int, default 0 Controls quoting behavior. Set to one of 0 (csv.QUOTE_MINIMAL), 1 (csv.QUOTE_ALL), 2 (csv.QUOTE_NONNUMERIC) or 3 (csv.QUOTE_NONE). Quoting is enabled with all values except 3. doublequote : bool, default True When quoting is enabled, indicates whether to interpret two consecutive quotechar inside fields as single quotechar header : int, default 'infer' Row number to use as the column names. Default behavior is to infer the column names: if no names are passed, header=0; if column names are passed explicitly, header=None. usecols : list of int or str, default None Returns subset of the columns given in the list. All elements must be either integer indices (column number) or strings that correspond to column names mangle_dupe_cols : boolean, default True Duplicate columns will be specified as 'X','X.1',...'X.N'. skiprows : int, default 0 Number of rows to be skipped from the start of file. skipfooter : int, default 0 Number of rows to be skipped at the bottom of file. compression : {'infer', 'gzip', 'zip', None}, default 'infer' For on-the-fly decompression of on-disk data. If ‘infer’, then detect compression from the following extensions: ‘.gz’,‘.zip’ (otherwise no decompression). If using ‘zip’, the ZIP file must contain only one data file to be read in, otherwise the first non-zero-sized file will be used. Set to None for no decompression. decimal : char, default '.' Character used as a decimal point. thousands : char, default None Character used as a thousands delimiter. true_values : list, default None Values to consider as boolean True false_values : list, default None Values to consider as boolean False nrows : int, default None If specified, maximum number of rows to read byte_range : list or tuple, default None Byte range within the input file to be read. The first number is the offset in bytes, the second number is the range size in bytes. Set the size to zero to read all data after the offset location. Reads the row that starts before or at the end of the range, even if it ends after the end of the range. skip_blank_lines : bool, default True If True, discard and do not parse empty lines If False, interpret empty lines as NaN values comment : char, default None Character used as a comments indicator. If found at the beginning of a line, the line will be ignored altogether. na_values : list, default None Values to consider as invalid keep_default_na : bool, default True Whether or not to include the default NA values when parsing the data. na_filter : bool, default True Detect missing values (empty strings and the values in na_values). Passing False can improve performance. prefix : str, default None Prefix to add to column numbers when parsing without a header row index_col : int or string, default None Column to use as the row labels Returns ------- GPU ``DataFrame`` object. Examples -------- Create a test csv file >>> import cudf >>> filename = 'foo.csv' >>> lines = [ ... "num1,datetime,text", ... "123,2018-11-13T12:00:00,abc", ... "456,2018-11-14T12:35:01,def", ... "789,2018-11-15T18:02:59,ghi" ... ] >>> with open(filename, 'w') as fp: ... fp.write('\\n'.join(lines)+'\\n') Read the file with ``cudf.read_csv`` >>> cudf.read_csv(filename) num1 datetime text 0 123 2018-11-13T12:00:00.000 5451 1 456 2018-11-14T12:35:01.000 5784 2 789 2018-11-15T18:02:59.000 6117 See Also -------- .read_csv_strings """ if delim_whitespace: if delimiter is not None: raise ValueError("cannot set both delimiter and delim_whitespace") if sep != ',': raise ValueError("cannot set both sep and delim_whitespace") # Alias sep -> delimiter. if delimiter is None: delimiter = sep if dtype is not None: if isinstance(dtype, collections.abc.Mapping): dtype_dict = True elif isinstance(dtype, collections.abc.Iterable): dtype_dict = False else: msg = '''dtype must be 'list like' or 'dict' ''' raise TypeError(msg) if names is not None and len(dtype) != len(names): msg = '''All column dtypes must be specified.''' raise TypeError(msg) nvtx_range_push("CUDF_READ_CSV", "purple") csv_reader = ffi.new('csv_read_arg*') # Populate csv_reader struct if is_file_like(filepath_or_buffer): if compression == 'infer': compression = None buffer = filepath_or_buffer.read() # check if StringIO is used if hasattr(buffer, 'encode'): buffer_as_bytes = buffer.encode() else: buffer_as_bytes = buffer buffer_data_holder = ffi.new("char[]", buffer_as_bytes) csv_reader.input_data_form = libgdf.HOST_BUFFER csv_reader.filepath_or_buffer = buffer_data_holder csv_reader.buffer_size = len(buffer_as_bytes) else: if (not os.path.isfile(filepath_or_buffer)): raise (FileNotFoundError) if (not os.path.exists(filepath_or_buffer)): raise (FileNotFoundError) file_path = _wrap_string(filepath_or_buffer) csv_reader.input_data_form = libgdf.FILE_PATH csv_reader.filepath_or_buffer = file_path if header == 'infer': header = -1 header_infer = header arr_names = [] arr_dtypes = [] if names is None: if header is -1: header_infer = 0 if header is None: header_infer = -1 csv_reader.names = ffi.NULL csv_reader.num_cols = 0 else: if header is None: header_infer = -1 csv_reader.num_cols = len(names) for col_name in names: arr_names.append(_wrap_string(col_name)) if dtype is not None: if dtype_dict: arr_dtypes.append(_wrap_string(str(dtype[col_name]))) names_ptr = ffi.new('char*[]', arr_names) csv_reader.names = names_ptr if dtype is None: csv_reader.dtype = ffi.NULL else: if not dtype_dict: for col_dtype in dtype: arr_dtypes.append(_wrap_string(str(col_dtype))) dtype_ptr = ffi.new('char*[]', arr_dtypes) csv_reader.dtype = dtype_ptr csv_reader.use_cols_int = ffi.NULL csv_reader.use_cols_int_len = 0 csv_reader.use_cols_char = ffi.NULL csv_reader.use_cols_char_len = 0 if usecols is not None: arr_col_names = [] if (all(isinstance(x, int) for x in usecols)): usecols_ptr = ffi.new('int[]', usecols) csv_reader.use_cols_int = usecols_ptr csv_reader.use_cols_int_len = len(usecols) else: for col_name in usecols: arr_col_names.append(_wrap_string(col_name)) col_names_ptr = ffi.new('char*[]', arr_col_names) csv_reader.use_cols_char = col_names_ptr csv_reader.use_cols_char_len = len(usecols) if decimal == delimiter: raise ValueError("decimal cannot be the same as delimiter") if thousands == delimiter: raise ValueError("thousands cannot be the same as delimiter") if nrows is not None and skipfooter != 0: raise ValueError("cannot use both nrows and skipfooter parameters") if byte_range is not None: if skipfooter != 0 or skiprows != 0 or nrows is not None: raise ValueError("""cannot manually limit rows to be read when using the byte range parameter""") arr_true_values = [] for value in true_values or []: arr_true_values.append(_wrap_string(str(value))) arr_true_values_ptr = ffi.new('char*[]', arr_true_values) csv_reader.true_values = arr_true_values_ptr csv_reader.num_true_values = len(arr_true_values) arr_false_values = [] for value in false_values or []: arr_false_values.append(_wrap_string(str(value))) false_values_ptr = ffi.new('char*[]', arr_false_values) csv_reader.false_values = false_values_ptr csv_reader.num_false_values = len(arr_false_values) arr_na_values = [] for value in na_values or []: arr_na_values.append(_wrap_string(str(value))) arr_na_values_ptr = ffi.new('char*[]', arr_na_values) csv_reader.na_values = arr_na_values_ptr csv_reader.num_na_values = len(arr_na_values) compression_bytes = _wrap_string(compression) prefix_bytes = _wrap_string(prefix) csv_reader.delimiter = delimiter.encode() csv_reader.lineterminator = lineterminator.encode() csv_reader.quotechar = quotechar.encode() csv_reader.quoting = _quoting_enum[quoting] csv_reader.doublequote = doublequote csv_reader.delim_whitespace = delim_whitespace csv_reader.skipinitialspace = skipinitialspace csv_reader.dayfirst = dayfirst csv_reader.header = header_infer csv_reader.skiprows = skiprows csv_reader.skipfooter = skipfooter csv_reader.mangle_dupe_cols = mangle_dupe_cols csv_reader.windowslinetermination = False csv_reader.compression = compression_bytes csv_reader.decimal = decimal.encode() csv_reader.thousands = thousands.encode() if thousands else b'\0' csv_reader.nrows = nrows if nrows is not None else -1 if byte_range is not None: csv_reader.byte_range_offset = byte_range[0] csv_reader.byte_range_size = byte_range[1] else: csv_reader.byte_range_offset = 0 csv_reader.byte_range_size = 0 csv_reader.skip_blank_lines = skip_blank_lines csv_reader.comment = comment.encode() if comment else b'\0' csv_reader.keep_default_na = keep_default_na csv_reader.na_filter = na_filter csv_reader.prefix = prefix_bytes # Call read_csv libgdf.read_csv(csv_reader) out = csv_reader.data if out == ffi.NULL: raise ValueError("Failed to parse CSV") # Extract parsed columns outcols = [] new_names = [] for i in range(csv_reader.num_cols_out): newcol = Column.from_cffi_view(out[i]) new_names.append(ffi.string(out[i].col_name).decode()) if (newcol.dtype == np.dtype('datetime64[ms]')): outcols.append(newcol.view(DatetimeColumn, dtype='datetime64[ms]')) else: outcols.append(newcol.view(NumericalColumn, dtype=newcol.dtype)) # Build dataframe df = DataFrame() # if names is not None and header_infer is -1: for k, v in zip(new_names, outcols): df[k] = v # Set index if the index_col parameter is passed if index_col is not None and index_col is not False: if isinstance(index_col, (int)): df = df.set_index(df.columns[index_col]) else: df = df.set_index(index_col) nvtx_range_pop() return df
def agg(self, args): """ Invoke aggregation functions on the groups. Parameters ---------- args : dict, list, str, callable - str The aggregate function name. - list List of *str* of the aggregate function. - dict key-value pairs of source column name and list of aggregate functions as *str*. Returns ------- result : DataFrame Notes ----- Since multi-indexes aren't supported aggregation results are returned in columns using the naming scheme of `aggregation_columnname`. """ result = DataFrame() add_col_values = True ctx = ffi.new('gdf_context*') ctx.flag_sorted = 0 ctx.flag_method = self._method ctx.flag_distinct = 0 sort_result = True # TODO: Use MultiColumn here instead of use_prefix # use_prefix enables old functionality - prefixing column # groupby names since we don't support MultiColumn quite yet use_prefix = 1 < len(self._val_columns) or 1 < len(args) if not isinstance(args, str) and isinstance(args, collections.abc.Sequence): for agg_type in args: val_columns_out = [ agg_type + '_' + val for val in self._val_columns ] if not use_prefix: val_columns_out = self._val_columns result = self._apply_agg(agg_type, result, add_col_values, ctx, self._val_columns, val_columns_out, sort_result=sort_result) add_col_values = False # we only want to add them once # TODO: Do multindex here if (self._as_index) and 1 == len(self._by): idx = index.as_index(result[self._by[0]]) idx.name = self._by[0] result = result.set_index(idx) result.drop_column(idx.name) elif isinstance(args, collections.abc.Mapping): if (len(args.keys()) == 1): if (len(list(args.values())[0]) == 1): sort_result = False for val, agg_type in args.items(): if not isinstance(agg_type, str) and \ isinstance(agg_type, collections.abc.Sequence): for sub_agg_type in agg_type: val_columns_out = [sub_agg_type + '_' + val] if not use_prefix: val_columns_out = self._val_columns result = self._apply_agg(sub_agg_type, result, add_col_values, ctx, [val], val_columns_out, sort_result=sort_result) elif isinstance(agg_type, str): val_columns_out = [agg_type + '_' + val] if not use_prefix: val_columns_out = self._val_columns result = self._apply_agg(agg_type, result, add_col_values, ctx, [val], val_columns_out, sort_result=sort_result) add_col_values = False # we only want to add them once # TODO: Do multindex here if (self._as_index) and 1 == len(self._by): idx = index.as_index(result[self._by[0]]) idx.name = self._by[0] result = result.set_index(idx) result.drop_column(idx.name) else: result = self.agg([args]) nvtx_range_pop() return result