Did I find the right examples for you? yes no      Crawl my project      Python Jobs

# cogent.maths.stats.special.MACHEP

All Samples(6)  |  Call(0)  |  Derive(0)  |  Import(6)

src/c/o/cogent-1.5.3/cogent/maths/stats/distribution.py   cogent(Download)
```"""
from __future__ import division
from cogent.maths.stats.special import erf, erfc, igamc, igam, betai, log1p, \
expm1, SQRTH, MACHEP, MAXNUM, PI, ndtri, incbi, igami, fix_rounding_error,\
ln_binomial
```
```            tz = 1
j = 3
while (j <= (k-2)) and ((tz/f) > MACHEP):
tz *= (j-1)/(z*j)
f += tz
```
```        tz = 1
j = 2
while (j <= (k-2)) and ((tz/f) > MACHEP):
tz *= (j-1)/(z*j)
f += tz
```

src/p/y/pycogent-HEAD/cogent/maths/stats/distribution.py   pycogent(Download)
```"""
from __future__ import division
from cogent.maths.stats.special import erf, erfc, igamc, igam, betai, log1p, \
expm1, SQRTH, MACHEP, MAXNUM, PI, ndtri, incbi, igami, fix_rounding_error,\
ln_binomial
```
```            tz = 1
j = 3
while (j <= (k-2)) and ((tz/f) > MACHEP):
tz *= (j-1)/(z*j)
f += tz
```
```        tz = 1
j = 2
while (j <= (k-2)) and ((tz/f) > MACHEP):
tz *= (j-1)/(z*j)
f += tz
```

src/q/i/qiime-HEAD/qiime/pycogent_backports/test.py   qiime(Download)
```                                             t_high, t_low, tprob, f_high, f_low, fprob, binomial_high, binomial_low,
ndtri)
from cogent.maths.stats.special import log_one_minus, one_minus_exp, MACHEP
from cogent.maths.stats import chisqprob
from cogent.maths.stats.ks import psmirnov2x, pkstwo
```
```    # One important difference is I preserve the original sample sizes
# instead of making them equal
tol = MACHEP * 100
combined = array(list(x) + list(y))
observed_stat, _p = ks_test(x, y, exact=False, warn_for_ties=False)
```
```    Uses the same Monte-Carlo resampling code as kw_boot
"""
tol = MACHEP * 100
combined = array(list(x) + list(y))
observed_stat, obs_p = mw_test(x, y)
```

src/p/y/pycogent-HEAD/cogent/maths/stats/test.py   pycogent(Download)
```    t_high, t_low, tprob, f_high, f_low, fprob, binomial_high, binomial_low, \
ndtri
from cogent.maths.stats.special import lgam, log_one_minus, one_minus_exp,\
MACHEP
from cogent.maths.stats.ks import psmirnov2x, pkstwo
```
```    # One important difference is I preserve the original sample sizes
# instead of making them equal
tol = MACHEP * 100
combined = array(list(x) + list(y))
observed_stat, _p = ks_test(x, y, exact=False, warn_for_ties=False)
```
```    Uses the same Monte-Carlo resampling code as kw_boot
"""
tol = MACHEP * 100
combined = array(list(x) + list(y))
observed_stat, obs_p = mw_test(x, y)
```

src/c/o/cogent-1.5.3/cogent/maths/stats/test.py   cogent(Download)
```    t_high, t_low, tprob, f_high, f_low, fprob, binomial_high, binomial_low, \
ndtri
from cogent.maths.stats.special import lgam, log_one_minus, one_minus_exp,\
MACHEP
from cogent.maths.stats.ks import psmirnov2x, pkstwo
```
```    # One important difference is I preserve the original sample sizes
# instead of making them equal
tol = MACHEP * 100
combined = array(list(x) + list(y))
observed_stat, _p = ks_test(x, y, exact=False, warn_for_ties=False)
```
```    Uses the same Monte-Carlo resampling code as kw_boot
"""
tol = MACHEP * 100
combined = array(list(x) + list(y))
observed_stat, obs_p = mw_test(x, y)
```

src/q/i/qiime-1.8.0/qiime/pycogent_backports/test.py   qiime(Download)
```    t_high, t_low, tprob, f_high, f_low, fprob, binomial_high, binomial_low,
ndtri)
from cogent.maths.stats.special import (lgam, log_one_minus, one_minus_exp,
MACHEP)
from cogent.maths.stats import chisqprob
```
```    # One important difference is I preserve the original sample sizes
# instead of making them equal
tol = MACHEP * 100
combined = array(list(x) + list(y))
observed_stat, _p = ks_test(x, y, exact=False, warn_for_ties=False)
```
```    Uses the same Monte-Carlo resampling code as kw_boot
"""
tol = MACHEP * 100
combined = array(list(x) + list(y))
observed_stat, obs_p = mw_test(x, y)
```