Did I find the right examples for you? yes no

All Samples(311)  |  Call(286)  |  Derive(0)  |  Import(25)

src/s/c/scikit-learn-0.14.1/sklearn/datasets/tests/test_samples_generator.py   scikit-learn(Download)
import numpy as np
 
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
 
    # Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
    assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
 
 
 
    # Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
    assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
 
 

src/s/c/scikit-learn-0.14.1/sklearn/linear_model/tests/test_sgd.py   scikit-learn(Download)
 
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
        p = clf.predict_proba([[.1, -.1], [.3, .2]])
        assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
        assert_almost_equal(p[0].sum(), 1)
        assert_true(np.all(p[0] >= 0))
 
 
        # should be similar up to some epsilon due to learning rate schedule
        assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
 
    @raises(ValueError)
        clf = self.factory(alpha=0.0001, n_iter=1000,
                           class_weight=None).fit(X, y)
        assert_almost_equal(metrics.f1_score(y, clf.predict(X)), 0.96,
                            decimal=1)
 
        # make the same prediction using automated class_weight
        clf_auto = self.factory(alpha=0.0001, n_iter=1000,
                                class_weight="auto").fit(X, y)
        assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96,

src/s/c/scikit-learn-0.14.1/sklearn/metrics/tests/test_metrics.py   scikit-learn(Download)
from sklearn.utils import check_random_state, shuffle
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.testing import (assert_true,
                                   assert_raises,
                                   assert_raise_message,
    expected_auc = _auc(y_true, probas_pred)
    assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
    assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
 
    with warnings.catch_warnings(record=True):
        assert_almost_equal(roc_auc, auc_score(y_true, probas_pred))
    assert_array_almost_equal(fs, 0.76, 2)
 
    assert_almost_equal(fbeta_score(y_true, y_pred, beta=2),
                        (1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
 
        y_pred = np.array([2, 0, 1, 1, 2, 0])
 
        assert_almost_equal(precision_score(y_true, y_pred,
                                            average='weighted'), 0.0, 2)
        assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),

src/s/c/scikit-learn-0.14.1/sklearn/tree/tests/test_tree.py   scikit-learn(Download)
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
        reg = Tree(random_state=1)
        reg.fit(X, y)
        assert_almost_equal(reg.predict(T), true_result,
                            err_msg="Failed with {0}".format(name))
 
        clf = Tree(max_features=1, random_state=1)
        clf.fit(X, y)
        assert_almost_equal(reg.predict(T), true_result,
                           clf.predict(iris.data),
                           err_msg="Failed with {0}".format(name))
        assert_almost_equal(clf.predict_proba(iris.data),
                            np.exp(clf.predict_log_proba(iris.data)), 8,
                            err_msg="Failed with {0}".format(name))
        reg = TreeRegressor(random_state=0)
        reg.fit(X, y)
        assert_almost_equal(clf.predict(X), y,
                            err_msg="Failed with {0}".format(name))
 

src/s/c/scikit-learn-0.14.1/sklearn/tests/test_random_projection.py   scikit-learn(Download)
    GaussianRandomProjection)
 
from sklearn.utils.testing import (
    assert_less,
    assert_raises,
        # - +sqrt(s) / sqrt(n_components)   with probability 1 / 2s
        #
        assert_almost_equal(np.mean(A == 0.0),
                            1 - 1 / s, decimal=2)
        assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
                            1 / (2 * s), decimal=2)
        assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
                            1 / (2 * s), decimal=2)
 
        assert_almost_equal(np.var(A == 0.0, ddof=1),

src/s/c/scikit-learn-0.14.1/sklearn/utils/tests/test_extmath.py   scikit-learn(Download)
 
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
def test_logsumexp():
    # Try to add some smallish numbers in logspace
    x = np.array([1e-40] * 1000000)
    logx = np.log(x)
    assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
    # ensure that the singular values of both methods are equal up to the real
    # rank of the matrix
    assert_almost_equal(s[:k], sa)
 
    # check the singular vectors too (while not checking the sign)
    assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
    # compute the singular values of X using the fast approximate method
    Ua, sa, Va = randomized_svd(X, k)
    assert_almost_equal(s[:rank], sa[:rank])
 
 

src/s/c/scikit-learn-0.14.1/sklearn/decomposition/tests/test_fastica.py   scikit-learn(Download)
from nose.tools import assert_raises
 
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
        # Check that the mixing model described in the docstring holds:
        if whiten:
            assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
 
        center_and_norm(s_)
        # Check that we have estimated the original sources
        if not add_noise:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
        else:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)

src/s/c/scikit-learn-0.14.1/sklearn/decomposition/tests/test_pca.py   scikit-learn(Download)
from scipy.sparse import csr_matrix
 
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
    pca = PCA()
    pca.fit(X)
    assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
 
    X_r = pca.transform(X)
 
    # the component-wise variance is thus highly varying:
    assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
 
    for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
        assert_array_almost_equal(X_whitened, X_whitened2)
 
        assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
        assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
 

src/s/c/scikit-learn-0.14.1/sklearn/tests/test_dummy.py   scikit-learn(Download)
from sklearn.base import clone
from sklearn.externals.six.moves import xrange
from sklearn.utils.testing import (assert_array_equal,
                                   assert_equal,
                                   assert_almost_equal,
    y_pred = clf.predict(X)
    p = np.bincount(y_pred) / float(len(X))
    assert_almost_equal(p[1], 3. / 5, decimal=1)
    assert_almost_equal(p[2], 2. / 5, decimal=1)
    _check_predict_proba(clf, X, y)
    for k in xrange(y.shape[1]):
        p = np.bincount(y_pred[:, k]) / float(len(X))
        assert_almost_equal(p[1], 3. / 5, decimal=1)
        assert_almost_equal(p[2], 2. / 5, decimal=1)
        _check_predict_proba(clf, X, y)

src/s/c/scikit-learn-0.14.1/sklearn/tests/test_multiclass.py   scikit-learn(Download)
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
        Y_pred = clf.predict(X_test)
        assert_true(clf.multilabel_)
        assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
                            prec,
                            decimal=2)
        assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
    Y_proba = clf.predict_proba(X_test)
 
    assert_almost_equal(Y_proba.sum(axis=1), 1.0)
    # predict assigns a label if the probability that the
    # sample has the label is greater than 0.5.

  1 | 2 | 3  Next