Example #1
0
def assert_finite(v):
    try:
        _assert_all_finite(v)
        return v
    except ValueError:
        print('Concatenated vectors contained invalid value: %s' % v)
        return None
Example #2
0
def infinite_columns(df):
    columns = []
    for c in df.columns:
        try:
            _assert_all_finite(df[c])
        except:
            columns.append(c)
    return columns
Example #3
0
File: data.py Project: dssg/drain
def infinite_columns(df):
    columns = []
    for c in df.columns:
        try:
            _assert_all_finite(df[c])
        except:
            columns.append(c)
    return columns
Example #4
0
def is_finite(v):
    v1 = np.array([v])
    try:
        _assert_all_finite(v1)
        return True
    except ValueError:
        print("Value %s was not finite" % v)
        return False
Example #5
0
def vectorize_transcript(cnt_vec, file):
    file = open(file, 'r')
    X = transform(cnt_vec, file.readlines())
    file.close()
    try:
        X = X.sum(axis=0, dtype=np.float64).getA()[0]
        _assert_all_finite(X)
        return X
    except ValueError:
        print('Transcript contained invalid value' % X)
        return None
    def predict(self, target=None, factors=None, prediction_len=None):
        """
        Prediction with auto-set method based by rnn_arch
        :param factors: np.array
        :param target: np.array
        :param prediction_len: int
        :return: np.array of predictions
        """

        # Some tests
        assert target is not None
        assert prediction_len is not None
        self.prediction_len = prediction_len

        if factors is not None:
            assert factors.shape[0] == self.n_lags
            assert factors.shape[1] == self.n_features - 1
        else:
            _assert_all_finite(target)
            assert len(target) == self.n_lags

        # Prepare input
        if isinstance(target, pd.Series):
            target = target.values
        input_df = target.reshape(-1, 1) if factors is None else np.hstack(
            (factors, target.reshape(-1, 1)))

        logger.info(f'[Prediction] Start predict by {self.strategy} strategy')
        # if Multi input Multi-out prediction
        if self.strategy == "MiMo":
            predicted = self._mimo_pred(input_df)
        # if Direct prediction
        elif self.strategy == "Direct":
            predicted = self._direct_pred(input_df)
        # if Recursive prediction
        elif self.strategy == "Recursive":
            predicted = self._recursive_pred(
                input_df, prediction_len=self.prediction_len)
        # if DirRec prediction
        elif self.strategy == 'DirRec':
            predicted = self._dirrec_pred(input_df)
        # if DirMo prediction
        elif self.strategy == 'DirMo':
            predicted = self._dirmo_pred(input_df)
        else:
            logger.critical(
                f'Use strategy from ["Direct", "Recursive", "MiMo", "DirRec", "DirMo"]'
            )
            raise AssertionError(
                f'Use strategy from ["Direct", "Recursive", "MiMo", "DirRec", "DirMo"]'
            )

        logger.info(f'[Prediction] End predict by {self.strategy} strategy')
        return predicted.flatten()
Example #7
0
    def convert_word2Xy(self, model_file, is_model_bin, use_basic_label,
                        use_rand_vec):
        """
		Convert word to topics data set to include embedding vector and processed label

		Parameters
		----------
		model_file : str
			embedding model file name
		is_model_bin : bool
			is embedding model in binary format (True), otherwise (False)
		use_basic_label : bool
			use basic concept as label (True), use specific concept as label (False)
		use_rand_vec : bool
			use random vectors as word vectors (True), use pre-trained vectors as word vectors (False)

		Returns
		-------
		None
		"""
        # preprocess steps
        print("Convert word_topics.tab -> (X,y)")
        if use_basic_label:
            print("Get basic labels from given ones")
        remove_multilabeled = True

        self.get_label_names(remove_multilabeled, use_basic_label)

        # get the number of unique classes and get the count of them
        # load word embeddings
        self.load_emb2domains(model_file, is_model_bin)
        self.create_Xy(use_rand_vec)
        self.words_labels.sort_values(by="topics_id", inplace=True)
        self.words_labels = self.words_labels.astype({
            "word": str,
            "topics": str,
            "topics_id": int
        })

        self.words_labels[[
            "x_" + str(i) for i in range(self.get_emb_num_dim())
        ]] = self.words_labels[[
            "x_" + str(i) for i in range(self.get_emb_num_dim())
        ]].astype(float)
        self.print_words_labels()
        sk_validation._assert_all_finite(self.get_x().values)
Example #8
0
def get_matching_funds(funds, quarter, year, not_processed):
    print("Getting matching fundamentals.")
    if year is 'no_year_found':
        print("No year found")
        return None, None, None
    match = funds[(funds.period_focus == quarter)
                  & (funds.fiscal_year == year)]
    if match.empty or match is None:
        print("Did not find quarter and year in dataframe:\n%s" % match)
        return None, None, None
    if not are_all_finite(match):
        out = open('bad_tr_ex.out', 'a')
        out.write(df_str(w) + "\n")
        out.close()
        return None, None, None
    match = clean_quarters(match)
    eps, diluted_eps = None, None
    try:
        eps, diluted_eps = match.loc[:, 'eps_basic'].iloc[
            0], match.loc[:, 'eps_diluted'].iloc[0]
        _assert_all_finite(eps)
    except KeyError as e:
        print("Caught key error: %s" % str(e))
        print("match: \n%s" % match)
        not_processed.no_eps.append(funds)
        return None, None, None
    except ValueError:
        not_processed.no_eps.append(match)
        print('eps value was not finite: %s' % eps)
        return None, None, None
    funds = funds.drop(match.index, axis=0)
    print("Returning matching fundamentals:\n%s" % str(match))
    match = drop_label(match, 'eps_basic')
    match = drop_label(match, 'eps_diluted')
    # match = drop_label(match, 'period_focus')

    match['period_focus'].map(lambda x: x.lower().replace("q", ""))
    match['period_focus'] = pd.to_numeric(match['period_focus'],
                                          downcast='integer')
    match = drop_label(match, 'fiscal_year')
    return match, eps, diluted_eps
Example #9
0
__author__ = 'SEOKHO'


import numpy as np
import pickle
from sklearn.utils import validation

with open("error.np", "rb") as f:
    X = pickle.load(f)

for array in X:
    try:
        validation._assert_all_finite(array)
    except ValueError:
        print(array)
Example #10
0
def check_fit_params(
    X: TwoDimArrayLikeType,
    y: OneDimArrayLikeType,
    sample_weight: Optional[OneDimArrayLikeType] = None,
    estimator: Optional[BaseEstimator] = None,
    **kwargs: Any
) -> Tuple[TwoDimArrayLikeType, OneDimArrayLikeType, OneDimArrayLikeType]:
    """Check `X`, `y` and `sample_weight`.

    Parameters
    ----------
    X
        Data.

    y
        Target.

    sample_weight
        Weights of data.

    estimator
        Object to use to fit the data.

    **kwargs
        Other keywords passed to `sklearn.utils.check_array`.

    Returns
    -------
    X
        Converted and validated data.

    y
        Converted and validated target.

    sample_weight
        Converted and validated weights of data.
    """
    X = check_X(X, estimator=estimator, **kwargs)

    if not isinstance(y, pd.Series):
        y = column_or_1d(y, warn=True)

    _assert_all_finite(y)

    if is_classifier(estimator):
        check_classification_targets(y)

    if sample_weight is None:
        n_samples = _num_samples(X)
        sample_weight = np.ones(n_samples)

    sample_weight = np.asarray(sample_weight)

    class_weight = getattr(estimator, "class_weight", None)

    if class_weight is not None:
        sample_weight *= compute_sample_weight(class_weight, y)

    check_consistent_length(X, y, sample_weight)

    return X, y, sample_weight
Example #11
0
    def run_NN(self, are_dim_reduced):
        """
		Run k-NN to evaluate word2vec model for wordNet domain labels

		Parameters
		----------
		are_dim_reduced : bool
			embedding dimensions are reduced (True), otherwise (False)

		Returns
		-------
		None
		"""
        print("Run kNN")
        if are_dim_reduced:
            x_min, x_max = np.min(self.X_low, 0), np.max(self.X_low, 0)
            X = (self.X_low - x_min) / (x_max - x_min)
        else:
            x_min, x_max = np.min(self.get_x().values,
                                  0), np.max(self.get_x().values, 0)
            X = (self.get_x().values - x_min) / (x_max - x_min)

        y = self.words_labels["topics_id"].values

        sk_validation._assert_all_finite(X)
        skf = StratifiedKFold(n_splits=5, random_state=self.random_state)

        fold_idx = 0
        test_acc_models = {
            2: [],
            5: [],
            20: [],
            40: []
        }  # for k-NN we can only assess the test acc

        for train_index, test_index in iter(skf.split(X, y)):
            fold_idx += 1
            X_train = X[train_index]
            y_train = y[train_index]
            X_test = X[test_index]
            y_test = y[test_index]

            estimators = {
                nn_num: neighbors.KNeighborsClassifier(nn_num,
                                                       weights='distance')
                for nn_num in [2, 5, 20, 40]
            }
            for index, (nn_num, estimator) in enumerate(estimators.items()):
                estimator.fit(X_train, y_train)
                y_test_pred = estimator.predict(X_test)
                test_accuracy = np.mean(
                    y_test_pred.ravel() == y_test.ravel()) * 100
                test_acc_models[nn_num].append(test_accuracy)

        for nn_num in [2, 5, 20, 40]:
            print("=== {}-NN ===".format(nn_num))
            print("max test acc: {}, min test acc: {}".format(
                max(test_acc_models[nn_num]), min(test_acc_models[nn_num])))
            print("avg test acc: {}".format(
                sum(test_acc_models[nn_num]) /
                float(len(test_acc_models[nn_num]))))
Example #12
0
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
                copy=False, force_all_finite=True, ensure_2d=True,
                allow_nd=False, ensure_min_samples=1, ensure_min_features=1):
    """Input validation on an array, list, sparse matrix or similar.
    By default, the input is converted to an at least 2nd numpy array.
    If the dtype of the array is object, attempt converting to float,
    raising on failure.
    Parameters
    ----------
    array : object
        Input object to check / convert.
    accept_sparse : string, list of string or None (default=None)
        String[s] representing allowed sparse matrix formats, such as 'csc',
        'csr', etc.  None means that sparse matrix input will raise an error.
        If the input is sparse but not in the allowed format, it will be
        converted to the first listed format.
    dtype : string, type or None (default="numeric")
        Data type of result. If None, the dtype of the input is preserved.
        If "numeric", dtype is preserved unless array.dtype is object.
    order : 'F', 'C' or None (default=None)
        Whether an array will be forced to be fortran or c-style.
    copy : boolean (default=False)
        Whether a forced copy will be triggered. If copy=False, a copy might
        be triggered by a conversion.
    force_all_finite : boolean (default=True)
        Whether to raise an error on np.inf and np.nan in X.
    ensure_2d : boolean (default=True)
        Whether to make X at least 2d.
    allow_nd : boolean (default=False)
        Whether to allow X.ndim > 2.
    ensure_min_samples : int (default=1)
        Make sure that the array has a minimum number of samples in its first
        axis (rows for a 2D array). Setting to 0 disables this check.
    ensure_min_features : int (default=1)
        Make sure that the 2D array has some minimum number of features
        (columns). The default value of 1 rejects empty datasets.
        This check is only enforced when the input data has effectively 2
        dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
        disables this check.
    Returns
    -------
    X_converted : object
        The converted and validated X.
    """
    if isinstance(accept_sparse, str):
        accept_sparse = [accept_sparse]

    # store whether originally we wanted numeric dtype
    dtype_numeric = dtype == "numeric"

    if sp.issparse(array):
        if dtype_numeric:
            dtype = None
        array = _ensure_sparse_format(array, accept_sparse, dtype, order,
                                      copy, force_all_finite)
    else:
        if ensure_2d:
            array = np.atleast_2d(array)
        if dtype_numeric:
            if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O":
                # if input is object, convert to float.
                dtype = np.float64
            else:
                dtype = None
        array = np.array(array, dtype=dtype, order=order, copy=copy)
        # make sure we actually converted to numeric:
        if dtype_numeric and array.dtype.kind == "O":
            array = array.astype(np.float64)
        if not allow_nd and array.ndim >= 3:
            raise ValueError("Found array with dim %d. Expected <= 2" %
                             array.ndim)
        if force_all_finite:
            _assert_all_finite(array)

    shape_repr = _shape_repr(array.shape)
    if ensure_min_samples > 0:
        n_samples = _num_samples(array)
        if n_samples < ensure_min_samples:
            raise ValueError("Found array with %d sample(s) (shape=%s) while a"
                             " minimum of %d is required."
                             % (n_samples, shape_repr, ensure_min_samples))

    if ensure_min_features > 0 and array.ndim == 2:
        n_features = array.shape[1]
        if n_features < ensure_min_features:
            raise ValueError("Found array with %d feature(s) (shape=%s) while"
                             " a minimum of %d is required."
                             % (n_features, shape_repr, ensure_min_features))
    return array
Example #13
0
def _ensure_sparse_format(
    spmatrix, accept_sparse, dtype, copy, force_all_finite, accept_large_sparse
):
    """Convert a sparse matrix to a given format.

    Checks the sparse format of spmatrix and converts if necessary.

    Parameters
    ----------
    spmatrix : scipy sparse matrix
        Input to validate and convert.

    accept_sparse : string, boolean or list/tuple of strings
        String[s] representing allowed sparse matrix formats ('csc',
        'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
        not in the allowed format, it will be converted to the first listed
        format. True allows the input to be any format. False means
        that a sparse matrix input will raise an error.

    dtype : string, type or None
        Data type of result. If None, the dtype of the input is preserved.

    copy : boolean
        Whether a forced copy will be triggered. If copy=False, a copy might
        be triggered by a conversion.

    force_all_finite : boolean or 'allow-nan', (default=True)
        Whether to raise an error on np.inf and np.nan in X. The possibilities
        are:

        - True: Force all values of X to be finite.
        - False: accept both np.inf and np.nan in X.
        - 'allow-nan': accept only np.nan values in X. Values cannot be
          infinite.

        .. versionadded:: 0.20
           ``force_all_finite`` accepts the string ``'allow-nan'``.

    Returns
    -------
    spmatrix_converted : scipy sparse matrix.
        Matrix that is ensured to have an allowed type.
    """
    if dtype is None:
        dtype = spmatrix.dtype

    changed_format = False

    if isinstance(accept_sparse, str):
        accept_sparse = [accept_sparse]

    # Indices dtype validation
    _check_large_sparse(spmatrix, accept_large_sparse)

    if accept_sparse is False:
        raise TypeError(
            "A sparse matrix was passed, but dense "
            "data is required. Use X.toarray() to "
            "convert to a dense numpy array."
        )
    elif isinstance(accept_sparse, (list, tuple)):
        if len(accept_sparse) == 0:
            raise ValueError(
                "When providing 'accept_sparse' "
                "as a tuple or list, it must contain at "
                "least one string value."
            )
        # ensure correct sparse format
        if spmatrix.format not in accept_sparse:
            # create new with correct sparse
            spmatrix = spmatrix.asformat(accept_sparse[0])
            changed_format = True
    elif accept_sparse is not True:
        # any other type
        raise ValueError(
            "Parameter 'accept_sparse' should be a string, "
            "boolean or list of strings. You provided "
            "'accept_sparse={}'.".format(accept_sparse)
        )

    if dtype != spmatrix.dtype:
        # convert dtype
        spmatrix = spmatrix.astype(dtype)
    elif copy and not changed_format:
        # force copy
        spmatrix = spmatrix.copy()

    if force_all_finite:
        if not hasattr(spmatrix, "data"):
            warnings.warn(
                "Can't check %s sparse matrix for nan or inf." % spmatrix.format,
                stacklevel=2,
            )
        else:
            _assert_all_finite(spmatrix.data, allow_nan=force_all_finite == "allow-nan")

    return spmatrix
Example #14
0
def check_array(
    array,
    accept_sparse=False,
    accept_large_sparse=True,
    dtype="numeric",
    order=None,
    copy=False,
    force_all_finite=True,
    ensure_2d=True,
    allow_nd=False,
    ensure_min_samples=1,
    ensure_min_features=1,
    warn_on_dtype=None,
    estimator=None,
    allow_complex=False,
):

    """Input validation on an array, list, sparse matrix or similar.

    By default, the input is checked to be a non-empty 2D array containing
    only finite values. If the dtype of the array is object, attempt
    converting to float, raising on failure.

    Parameters
    ----------
    array : object
        Input object to check / convert.

    accept_sparse : string, boolean or list/tuple of strings (default=False)
        String[s] representing allowed sparse matrix formats, such as 'csc',
        'csr', etc. If the input is sparse but not in the allowed format,
        it will be converted to the first listed format. True allows the input
        to be any format. False means that a sparse matrix input will
        raise an error.

    accept_large_sparse : bool (default=True)
        If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
        accept_sparse, accept_large_sparse=False will cause it to be accepted
        only if its indices are stored with a 32-bit dtype.

        .. versionadded:: 0.20

    dtype : string, type, list of types or None (default="numeric")
        Data type of result. If None, the dtype of the input is preserved.
        If "numeric", dtype is preserved unless array.dtype is object.
        If dtype is a list of types, conversion on the first type is only
        performed if the dtype of the input is not in the list.

    order : 'F', 'C' or None (default=None)
        Whether an array will be forced to be fortran or c-style.
        When order is None (default), then if copy=False, nothing is ensured
        about the memory layout of the output array; otherwise (copy=True)
        the memory layout of the returned array is kept as close as possible
        to the original array.

    copy : boolean (default=False)
        Whether a forced copy will be triggered. If copy=False, a copy might
        be triggered by a conversion.

    force_all_finite : boolean or 'allow-nan', (default=True)
        Whether to raise an error on np.inf and np.nan in array. The
        possibilities are:

        - True: Force all values of array to be finite.
        - False: accept both np.inf and np.nan in array.
        - 'allow-nan': accept only np.nan values in array. Values cannot
          be infinite.

        For object dtyped data, only np.nan is checked and not np.inf.

        .. versionadded:: 0.20
           ``force_all_finite`` accepts the string ``'allow-nan'``.

    ensure_2d : boolean (default=True)
        Whether to raise a value error if array is not 2D.

    allow_nd : boolean (default=False)
        Whether to allow array.ndim > 2.

    ensure_min_samples : int (default=1)
        Make sure that the array has a minimum number of samples in its first
        axis (rows for a 2D array). Setting to 0 disables this check.

    ensure_min_features : int (default=1)
        Make sure that the 2D array has some minimum number of features
        (columns). The default value of 1 rejects empty datasets.
        This check is only enforced when the input data has effectively 2
        dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
        disables this check.

    warn_on_dtype : boolean or None, optional (default=None)
        Raise DataConversionWarning if the dtype of the input data structure
        does not match the requested dtype, causing a memory copy.

        .. deprecated:: 0.21
            ``warn_on_dtype`` is deprecated in version 0.21 and will be
            removed in 0.23.

    estimator : str or estimator instance (default=None)
        If passed, include the name of the estimator in warning messages.

    Returns
    -------
    array_converted : object
        The converted and validated array.
    """
    # warn_on_dtype deprecation
    if warn_on_dtype is not None:
        warnings.warn(
            "'warn_on_dtype' is deprecated in version 0.21 and will be "
            "removed in 0.23. Don't set `warn_on_dtype` to remove this "
            "warning.",
            FutureWarning,
            stacklevel=2,
        )

    # store reference to original array to check if copy is needed when
    # function returns
    array_orig = array

    # store whether originally we wanted numeric dtype
    dtype_numeric = isinstance(dtype, str) and dtype == "numeric"

    dtype_orig = getattr(array, "dtype", None)
    if not hasattr(dtype_orig, "kind"):
        # not a data type (e.g. a column named dtype in a pandas DataFrame)
        dtype_orig = None

    # check if the object contains several dtypes (typically a pandas
    # DataFrame), and store them. If not, store None.
    dtypes_orig = None
    if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"):
        dtypes_orig = list(array.dtypes)
        # pandas boolean dtype __array__ interface coerces bools to objects
        for i, dtype_iter in enumerate(dtypes_orig):
            if dtype_iter.kind == "b":
                dtypes_orig[i] = np.object

        if all(isinstance(dtype, np.dtype) for dtype in dtypes_orig):
            dtype_orig = np.result_type(*dtypes_orig)

    if dtype_numeric:
        if dtype_orig is not None and dtype_orig.kind == "O":
            # if input is object, convert to float.
            dtype = np.float64
        else:
            dtype = None

    if isinstance(dtype, (list, tuple)):
        if dtype_orig is not None and dtype_orig in dtype:
            # no dtype conversion required
            dtype = None
        else:
            # dtype conversion required. Let's select the first element of the
            # list of accepted types.
            dtype = dtype[0]

    if force_all_finite not in (True, False, "allow-nan"):
        raise ValueError(
            'force_all_finite should be a bool or "allow-nan"'
            ". Got {!r} instead".format(force_all_finite)
        )

    if estimator is not None:
        if isinstance(estimator, str):
            estimator_name = estimator
        else:
            estimator_name = estimator.__class__.__name__
    else:
        estimator_name = "Estimator"
    context = " by %s" % estimator_name if estimator is not None else ""

    if sp.issparse(array):
        if not allow_complex:
            _ensure_no_complex_data(array)
        array = _ensure_sparse_format(
            array,
            accept_sparse=accept_sparse,
            dtype=dtype,
            copy=copy,
            force_all_finite=force_all_finite,
            accept_large_sparse=accept_large_sparse,
        )
    else:
        # If np.array(..) gives ComplexWarning, then we convert the warning
        # to an error. This is needed because specifying a non complex
        # dtype to the function converts complex to real dtype,
        # thereby passing the test made in the lines following the scope
        # of warnings context manager.
        with warnings.catch_warnings():
            try:
                warnings.simplefilter("error", ComplexWarning)
                if dtype is not None and np.dtype(dtype).kind in "iu":
                    # Conversion float -> int should not contain NaN or
                    # inf (numpy#14412). We cannot use casting='safe' because
                    # then conversion float -> int would be disallowed.
                    array = np.asarray(array, order=order)
                    if array.dtype.kind == "f":
                        _assert_all_finite(array, allow_nan=False, msg_dtype=dtype)
                    array = array.astype(dtype, casting="unsafe", copy=False)
                else:
                    array = np.asarray(array, order=order, dtype=dtype)
            except ComplexWarning:
                raise ValueError("Complex data not supported\n" "{}\n".format(array))

        if not allow_complex:
            # It is possible that the np.array(..) gave no warning. This happens
            # when no dtype conversion happened, for example dtype = None. The
            # result is that np.array(..) produces an array of complex dtype
            # and we need to catch and raise exception for such cases.
            _ensure_no_complex_data(array)

        if ensure_2d:
            # If input is scalar raise error
            if array.ndim == 0:
                raise ValueError(
                    "Expected 2D array, got scalar array instead:\narray={}.\n"
                    "Reshape your data either using array.reshape(-1, 1) if "
                    "your data has a single feature or array.reshape(1, -1) "
                    "if it contains a single sample.".format(array)
                )
            # If input is 1D raise error
            if array.ndim == 1:
                raise ValueError(
                    "Expected 2D array, got 1D array instead:\narray={}.\n"
                    "Reshape your data either using array.reshape(-1, 1) if "
                    "your data has a single feature or array.reshape(1, -1) "
                    "if it contains a single sample.".format(array)
                )

        # in the future np.flexible dtypes will be handled like object dtypes
        if dtype_numeric and np.issubdtype(array.dtype, np.flexible):
            warnings.warn(
                "Beginning in version 0.22, arrays of bytes/strings will be "
                "converted to decimal numbers if dtype='numeric'. "
                "It is recommended that you convert the array to "
                "a float dtype before using it in scikit-learn, "
                "for example by using "
                "your_array = your_array.astype(np.float64).",
                FutureWarning,
                stacklevel=2,
            )

        # make sure we actually converted to numeric:
        if dtype_numeric and array.dtype.kind == "O":
            array = array.astype(np.float64)
        if not allow_nd and array.ndim >= 3:
            raise ValueError(
                "Found array with dim %d. %s expected <= 2."
                % (array.ndim, estimator_name)
            )

        if force_all_finite:
            _assert_all_finite(array, allow_nan=force_all_finite == "allow-nan")

    if ensure_min_samples > 0:
        n_samples = _num_samples(array)
        if n_samples < ensure_min_samples:
            raise ValueError(
                "Found array with %d sample(s) (shape=%s) while a"
                " minimum of %d is required%s."
                % (n_samples, array.shape, ensure_min_samples, context)
            )

    if ensure_min_features > 0 and array.ndim == 2:
        n_features = array.shape[1]
        if n_features < ensure_min_features:
            raise ValueError(
                "Found array with %d feature(s) (shape=%s) while"
                " a minimum of %d is required%s."
                % (n_features, array.shape, ensure_min_features, context)
            )

    if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
        msg = "Data with input dtype %s was converted to %s%s." % (
            dtype_orig,
            array.dtype,
            context,
        )
        warnings.warn(msg, DataConversionWarning, stacklevel=2)

    if copy and np.may_share_memory(array, array_orig):
        array = np.array(array, dtype=dtype, order=order)

    if warn_on_dtype and dtypes_orig is not None and {array.dtype} != set(dtypes_orig):
        # if there was at the beginning some other types than the final one
        # (for instance in a DataFrame that can contain several dtypes) then
        # some data must have been converted
        msg = "Data with input dtype %s were all converted to %s%s." % (
            ", ".join(map(str, sorted(set(dtypes_orig)))),
            array.dtype,
            context,
        )
        warnings.warn(msg, DataConversionWarning, stacklevel=3)

    return array
Example #15
0
def _daal_assert_all_finite(X, allow_nan=False, msg_dtype=None,
                            estimator_name=None, input_name=""):
    if _get_config()['assume_finite']:
        return

    # Data with small size has too big relative overhead
    # TODO: tune threshold size
    if hasattr(X, 'size'):
        if X.size < 32768:
            if sklearn_check_version("1.1"):
                _assert_all_finite(X, allow_nan=allow_nan, msg_dtype=msg_dtype,
                                   estimator_name=estimator_name, input_name=input_name)
            else:
                _assert_all_finite(X, allow_nan=allow_nan, msg_dtype=msg_dtype)
            return

    is_df = is_DataFrame(X)
    num_of_types = get_number_of_types(X)

    # if X is heterogeneous pandas.DataFrame then
    # covert it to a list of arrays
    if is_df and num_of_types > 1:
        lst = []
        for idx in X:
            arr = X[idx].to_numpy()
            lst.append(arr if arr.flags['C_CONTIGUOUS'] else np.ascontiguousarray(arr))
    else:
        X = np.asanyarray(X)
        is_df = False

    dt = np.dtype(get_dtype(X))
    is_float = dt.kind in 'fc'

    msg_err = "Input contains {} or a value too large for {!r}."
    type_err = 'infinity' if allow_nan else 'NaN, infinity'
    err = msg_err.format(type_err, msg_dtype if msg_dtype is not None else dt)

    if X.ndim in [1, 2] and not np.any(np.equal(X.shape, 0)) and \
            dt in [np.float32, np.float64]:
        if X.ndim == 1:
            X = X.reshape((-1, 1))

        x_for_daal = lst if is_df and num_of_types > 1 else X

        if dt == np.float64:
            if not d4p.daal_assert_all_finite(x_for_daal, allow_nan, 0):
                raise ValueError(err)
        elif dt == np.float32:
            if not d4p.daal_assert_all_finite(x_for_daal, allow_nan, 1):
                raise ValueError(err)
    # First try an O(n) time, O(1) space solution for the common case that
    # everything is finite; fall back to O(n) space np.isfinite to prevent
    # false positives from overflow in sum method. The sum is also calculated
    # safely to reduce dtype induced overflows.
    elif is_float and (np.isfinite(_safe_accumulator_op(np.sum, X))):
        pass
    elif is_float:
        if allow_nan and np.isinf(X).any() or \
                not allow_nan and not np.isfinite(X).all():
            raise ValueError(err)
    # for object dtype data, we only check for NaNs (GH-13254)
    elif dt == np.dtype('object') and not allow_nan:
        if _object_dtype_isnan(X).any():
            raise ValueError("Input contains NaN")
Example #16
0
__author__ = 'SEOKHO'

import numpy as np
import pickle
from sklearn.utils import validation

with open("error.np", "rb") as f:
    X = pickle.load(f)

for array in X:
    try:
        validation._assert_all_finite(array)
    except ValueError:
        print(array)