Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(115)  |  Call(90)  |  Derive(0)  |  Import(25)

src/p/y/pylearn2-HEAD/pylearn2/scripts/gsn_example.py   pylearn2(Download)
from pylearn2.train import Train
from pylearn2.training_algorithms.sgd import SGD, MonitorBasedLRAdjuster
from pylearn2.utils import image, safe_zip
 
# define some common parameters
            labels.append(nd)
 
        data = safe_zip(images, labels)
        data = list(itertools.chain(*data))
 

src/p/y/pylearn2-HEAD/pylearn2/models/dbm/layer.py   pylearn2(Download)
from pylearn2.space import VectorSpace, CompositeSpace, Conv2DSpace, Space
from pylearn2.utils import is_block_gradient
from pylearn2.utils import sharedX, safe_zip, py_integer_types, block_gradient
from pylearn2.utils.rng import make_theano_rng
"""
            assert all([len(elem) == 2 for elem in [state, coeffs]])
 
        for s, c in safe_zip(state, coeffs):
            assert all([isinstance(elem, float) for elem in [c]])
            if c == 0.:
            assert all([len(elem) == 2 for elem in [state, coeffs]])
 
        for s, c in safe_zip(state, coeffs):
            assert all([isinstance(elem, float) for elem in [c]])
            if c == 0.:
                warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
 
        for s, t, c, e in safe_zip(state, target, coeff, eps):
            assert all([isinstance(elem, float) or hasattr(elem, 'dtype') for elem in [t, c, e]])
            if c == 0.:
                warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
 
        for s, t, c in safe_zip(state, target, coeff):
            assert all([isinstance(elem, float) or hasattr(elem, 'dtype') for elem in [t, c]])
            if c == 0.:

src/p/y/pylearn2-HEAD/pylearn2/costs/dbm.py   pylearn2(Download)
from pylearn2.utils import make_name
from pylearn2.utils import safe_izip
from pylearn2.utils import safe_zip
from pylearn2.utils import sharedX
from pylearn2.utils.rng import make_theano_rng
 
        neg_phase_grads = OrderedDict(
            safe_zip(params, T.grad(-expected_energy_p, params,
                                    consider_constant=constants)))
        return neg_phase_grads
 
        neg_phase_grads = OrderedDict(
            safe_zip(params, T.grad(-expected_energy_p, params,
                                    consider_constant=samples,
                                    disconnected_inputs='ignore'))
        params = list(model.get_params())
        gradients = OrderedDict(
            safe_zip(params, T.grad(expected_energy_q,
                                    params,
                                    consider_constant=variational_params,
        params = list(model.get_params())
        gradients = OrderedDict(
            safe_zip(params, T.grad(expected_energy_q, params,
                                    consider_constant=pos_samples,
                                    disconnected_inputs='ignore'))

src/p/y/pylearn2-HEAD/pylearn2/optimization/batch_gradient_descent.py   pylearn2(Download)
from pylearn2.utils import function
from pylearn2.utils import grad
from pylearn2.utils import safe_zip
from pylearn2.utils import sharedX
 
            def dot_product(x, y):
                return sum([ (x_elem * y_elem).sum() for x_elem, y_elem in safe_zip(x, y) ])
 
            beta_pr = (dot_product(grad_ordered, grad_ordered) - dot_product(grad_ordered, old_grad_ordered)) / \
                    (1e-7+dot_product(old_grad_ordered, old_grad_ordered))
                            logger.info('shrinking the range of step sizes')
                        alpha_list = [ (alpha ** weight) * (best_alpha ** (1.-weight)) for alpha in alpha_list ]
                        assert all([second > first for first, second in safe_zip(alpha_list[:-1], alpha_list[1:])])
                        # y^(weight) best^(1-weight) / x^(weight) best^(1-weight) = (y/x)^weight
                        # so this shrinks the ratio between each successive pair of alphas by raising it to weight
            WRITEME
        """
        return [elem for elem, shared in safe_zip(inputs, self._shared_mask) if not shared ]
 
    def _shared_inputs(self, inputs):
            WRITEME
        """
        return [elem for elem, shared in safe_zip(inputs, self._shared_mask) if shared ]
 
    def _set_shared(self, inputs):

src/p/y/pylearn2-HEAD/pylearn2/space/__init__.py   pylearn2(Download)
from theano.gof.op import get_debug_values
from theano.sandbox.cuda.type import CudaNdarrayType
from pylearn2.utils import py_integer_types, safe_zip, sharedX, wraps
from pylearn2.format.target_format import OneHotFormatter
 
        """
        if isinstance(new_dtype, tuple):
            for component, new_dt in safe_zip(self.components, new_dtype):
                component.dtype = new_dt
        elif new_dtype is None or isinstance(new_dtype, str):
                    return tuple(recursive_format_as(os, bt, ds)
                                 for os, bt, ds
                                 in safe_zip(orig_space.components,
                                             batch,
                                             dest_space.components))
        return tuple(component.get_origin_batch(batch_size, dt)
                     for component, dt
                     in safe_zip(self.components, dtype))
 
    @functools.wraps(Space.make_theano_batch)
                                          dtype=d,
                                          batch_size=batch_size)
                      for x, n, d in safe_zip(self.components,
                                              name,
                                              dtype)])

src/p/y/pylearn2-HEAD/pylearn2/models/dbm/dbm.py   pylearn2(Download)
from pylearn2.models.dbm.inference_procedure import WeightDoubling
from pylearn2.models.dbm.sampling_procedure import GibbsEvenOdd
from pylearn2.utils import safe_zip, safe_izip
from pylearn2.utils.rng import make_np_rng
 
        states = [layer.make_state(num_examples, rng) for layer in layers]
 
        zipped = safe_zip(layers, states)
 
        def recurse_check(layer, state):
                  for layer in layers]
 
        zipped = safe_zip(layers, states)
 
        rval = OrderedDict(zipped)
            rval['vis_' + key] = ch[key]
 
        for state, layer in safe_zip(q, self.hidden_layers):
            ch = layer.get_monitoring_channels()
            for key in ch:
 
            mx = None
            for new, old in safe_zip(flat_q, flat_prev_q):
                cur_mx = abs(new - old).max()
                if new is old:

src/p/y/pylearn2-HEAD/pylearn2/models/mlp.py   pylearn2(Download)
from pylearn2.utils import py_integer_types
from pylearn2.utils import safe_union
from pylearn2.utils import safe_zip
from pylearn2.utils import safe_izip
from pylearn2.utils import sharedX
            assert all(layer_coeff >= 0 for layer_coeff in coeff)
            return T.sum([getattr(layer, method_name)(layer_coeff) for
                          layer, layer_coeff in safe_zip(self.layers, coeff)
                          if layer_coeff > 0])
        else:
    def cost(self, Y, Y_hat):
        return sum(layer.cost(Y_elem, Y_hat_elem)
                   for layer, Y_elem, Y_hat_elem in
                   safe_zip(self.layers, Y, Y_hat))
 

src/p/y/pylearn2-HEAD/pylearn2/models/gsn.py   pylearn2(Download)
from pylearn2.models.autoencoder import Autoencoder
from pylearn2.models.model import Model
from pylearn2.utils import safe_zip
 
# Enforce correct restructured text list format.
        """
        # the indices which are being set
        set_idxs = safe_zip(*minibatch)[0]
 
        if self.nlayers == 2 and len(set_idxs) == 2:
 
        if clamped is not None:
            vals = safe_zip(*minibatch)[1]
            clamped = safe_zip(set_idxs, vals, clamped)
 
        def compile_f_init():
            mb = T.matrices(len(indices))
            zipped = safe_zip(indices, mb)
            f_init = theano.function(mb,
                                     self._set_activations(zipped, corrupt=True),

src/p/y/pylearn2-HEAD/pylearn2/costs/cost.py   pylearn2(Download)
from theano.compat.python2x import OrderedDict
 
from pylearn2.utils import safe_zip
from pylearn2.utils import safe_union
from pylearn2.space import CompositeSpace, NullSpace
        nested_data = mapping.nest(data)
        costs = []
        for cost, cost_data in safe_zip(self.costs, nested_data):
            costs.append(cost.expr(model, cost_data, **kwargs))
        assert len(costs) > 0
        else:
            costs = [coeff * cost
                     for coeff, cost in safe_zip(self.coeffs, costs)]
            assert len(costs) > 0
            sum_of_costs = reduce(lambda x, y: x + y, costs)
    def get_gradients(self, model, data, ** kwargs):
        indiv_results = []
        composite_specs, mapping = self.get_composite_specs_and_mapping(model)
        nested_data = mapping.nest(data)
        for cost, cost_data in safe_zip(self.costs, nested_data):
 
        descrs = [cost.get_fixed_var_descr(model, cost_data)
                  for cost, cost_data in safe_zip(self.costs, nested_data)]
 
        return reduce(merge, descrs)

src/p/y/pylearn2-HEAD/pylearn2/training_algorithms/sgd.py   pylearn2(Download)
from pylearn2.utils.iteration import is_stochastic, has_uniform_batch_size
from pylearn2.utils import py_integer_types, py_float_types
from pylearn2.utils import safe_zip
from pylearn2.utils import serial
from pylearn2.utils import sharedX
        # only once to the compiled Theano function.
        theano_args = []
        for space, source in safe_zip(space_tuple, source_tuple):
            name = '%s[%s]' % (self.__class__.__name__, source)
            arg = space.make_theano_batch(name=name,
        else:
            # Use standard SGD updates with fixed learning rate.
            updates.update( dict(safe_zip(params, [param - learning_rate * \
                lr_scalers.get(param, 1.) * grads[param]
                                    for param in params])))

  1 | 2 | 3  Next