Did I find the right examples for you? yes no

All Samples(378)  |  Call(330)  |  Derive(0)  |  Import(48)

src/s/c/scikit-learn-0.14.1/sklearn/metrics/tests/test_metrics.py   scikit-learn(Download)
from sklearn.utils import check_random_state, shuffle
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.testing import (assert_true,
                                   assert_raises,
                                   assert_raise_message,
def test_roc_curve_multi():
    """roc_curve not applicable for multi-class problems"""
    y_true, _, probas_pred = make_prediction(binary=False)
 
    assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_auc_errors():
    # Incompatible shapes
    assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
 
    # Too few x values
    assert_raises(ValueError, auc, [0.0], [0.1])
 
    # x is not in order
    assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])

src/s/c/scikit-learn-0.14.1/sklearn/tests/test_cross_validation.py   scikit-learn(Download)
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
def test_kfold_valueerrors():
    # Check that errors are raised if there is not enough samples
    assert_raises(ValueError, cval.KFold, 3, 4)
 
    # Check that a warning is raised if the least populated class has too few
 
    # Error when number of folds is <= 1
    assert_raises(ValueError, cval.KFold, 2, 0)
    assert_raises(ValueError, cval.KFold, 2, 1)
    assert_raises(ValueError, cval.StratifiedKFold, y, 0)

src/s/c/scikit-learn-0.14.1/sklearn/cluster/tests/test_spectral.py   scikit-learn(Download)
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
 
        assert_greater(np.mean(labels == true_labels), .3)
    else:
        assert_raises(ValueError, spectral_embedding, S,
                      n_components=len(centers),
                      random_state=0, eigen_solver="amg")
    S = np.max(D) - D  # Similarity matrix
    S = sparse.coo_matrix(S)
    assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
                  random_state=0, eigen_solver="<unknown>")
 
    S = np.max(D) - D  # Similarity matrix
    S = sparse.coo_matrix(S)
    assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
                  random_state=0, assign_labels="<unknown>")
 
    # raise error on unknown affinity
    sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
    assert_raises(ValueError, sp.fit, X)
 
 

src/s/c/scikit-learn-0.14.1/sklearn/ensemble/tests/test_partial_dependence.py   scikit-learn(Download)
from numpy.testing import assert_array_equal
 
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
    clf.fit(X, y)
 
    assert_raises(ValueError, partial_dependence,
                  clf, [0], grid=None, X=None)
 
    assert_raises(ValueError, partial_dependence,
                  clf, [0], grid=[0, 1], X=X)
 
    # first argument must be an instance of BaseGradientBoosting
    assert_raises(ValueError, partial_dependence,
 
    # Gradient boosting estimator must be fit
    assert_raises(ValueError, partial_dependence,
                  GradientBoostingClassifier(), [0], X=X)
 

src/s/c/scikit-learn-0.14.1/sklearn/tests/test_grid_search.py   scikit-learn(Download)
 
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
    # Test exception handling on scoring
    grid_search.scoring = 'sklearn'
    assert_raises(ValueError, grid_search.fit, X, y)
 
 
    clf = LinearSVC()
    cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
    assert_raises(ValueError, cv.fit, X_[:180], y_)
 
 
def test_grid_search_bad_param_grid():
    param_dict = {"C": 1.0}
    clf = SVC()
    assert_raises(ValueError, GridSearchCV, clf, param_dict)
 
    param_dict = {"C": []}
    clf = SVC()
    assert_raises(ValueError, GridSearchCV, clf, param_dict)

src/s/c/scikit-learn-0.14.1/sklearn/metrics/tests/test_pairwise.py   scikit-learn(Download)
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
 
    assert_array_almost_equal(S, S2)
    # manhattan does not support sparse matrices atm.
    assert_raises(ValueError, pairwise_distances, csr_matrix(X),
                  metric="manhattan")
    # Low-level function for manhattan can divide in blocks to avoid
    assert_array_almost_equal(S, S2)
    # Test that scipy distance metrics throw an error if sparse matrix given
    assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
    assert_raises(TypeError, pairwise_distances, X, Y_sparse,
                  metric="minkowski")
        if metric in ["chi2", "additive_chi2"]:
            # these don't support sparse matrices yet
            assert_raises(ValueError, pairwise_kernels,
                          X_sparse, Y=Y_sparse, metric=metric)
            continue

src/s/c/scikit-learn-0.14.1/sklearn/decomposition/tests/test_kernel_pca.py   scikit-learn(Download)
import numpy as np
import scipy.sparse as sp
 
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
                                   assert_equal, assert_not_equal,
def test_invalid_parameters():
    assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
                  kernel='precomputed')
 
 
def test_kernel_pca_invalid_kernel():
    rng = np.random.RandomState(0)
    X_fit = rng.random_sample((2, 4))
    kpca = KernelPCA(kernel="tototiti")
    assert_raises(ValueError, kpca.fit, X_fit)

src/s/c/scikit-learn-0.14.1/sklearn/linear_model/tests/test_omp.py   scikit-learn(Download)
import numpy as np
 
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
def test_bad_input():
    assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
    assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
    assert_raises(ValueError, orthogonal_mp, X, y,
                  n_nonzero_coefs=n_features + 1)
    assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)

src/s/c/scikit-learn-0.14.1/sklearn/datasets/tests/test_svmlight_format.py   scikit-learn(Download)
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
    utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
    f = BytesIO()
    assert_raises(UnicodeDecodeError,
                  dump_svmlight_file, X, y, f, comment=utf8_comment)
 
 
    f = BytesIO()
    assert_raises(ValueError,
                  dump_svmlight_file, X, y, f, comment="I've got a \0.")
 
    f = BytesIO()
    y2d = [y]
    assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
 
    f = BytesIO()
    assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)

src/s/c/scikit-learn-0.14.1/sklearn/linear_model/tests/test_sgd.py   scikit-learn(Download)
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
 
        Y_ = np.c_[Y_, Y_]
        assert_raises(ValueError, clf.fit, X, Y_)
 
    def test_clone(self):
        # Provided coef_ does not match dataset
        clf = self.factory()
        assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
 
        # Provided coef_ does match dataset
        clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
 
        # Provided intercept_ does not match dataset
        clf = self.factory()
        assert_raises(ValueError, clf.fit, X2, Y2,
        # hinge loss does not allow for conditional prob estimate
        clf = self.factory(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
        assert_raises(NotImplementedError, clf.predict_proba, [3, 2])
 
        # log and modified_huber losses can output probability estimates

  1 | 2 | 3 | 4 | 5  Next