提交 7a39f5bf authored 作者: abergeron's avatar abergeron

Merge pull request #2013 from nouiz/mixed

[CRASH,DOC,ENH] Mixed
......@@ -586,6 +586,14 @@ dimensions, see :meth:`_tensor_py_operators.dimshuffle`.
broadcastable. When performing the function, if the length of `x`
along that dimension is not 1, a ``ValueError`` will be raised.
.. function:: patternbroadcast(x, broadcastable)
Change `x` broadcastable pattern to
`broadcastable`. `broadcastable` must be iterable. For example,
`patternbroadcast(x, (True, False))` will make the first dimension
of `x` broadcastable and the second dimension not broadcastable,
so x will now be a `row`.
.. function:: flatten(x, outdim=1)
Similar to :func:`reshape`, but the shape is inferred from the shape of `x`.
......
......@@ -11,10 +11,18 @@ _logger = logging.getLogger('theano.configdefaults')
config = TheanoConfigParser()
def floatX_convert(s):
if s == "32":
return "float32"
elif s == "64":
return "float64"
else:
return s
AddConfigVar('floatX',
"Default floating-point precision for python casts",
EnumStr('float64', 'float32'),
)
"Default floating-point precision for python casts",
EnumStr('float64', 'float32', convert=floatX_convert,),
)
AddConfigVar('cast_policy',
"Rules for implicit type casting",
......
......@@ -305,7 +305,11 @@ class EnumStr(ConfigParam):
raise ValueError('Valid values for an EnumStr parameter '
'should be strings', val, type(val))
convert = kwargs.get("convert", None)
def filter(val):
if convert:
val = convert(val)
if val in self.all:
return val
else:
......
......@@ -8,7 +8,7 @@ from nose.plugins.skip import SkipTest
import theano
from theano.compile.pfunc import pfunc
from theano import config, tensor
import theano.sandbox.linalg.tests.test_linalg
import theano.tensor.tests.test_nlinalg
from theano.tests import unittest_tools as utt
......@@ -393,14 +393,14 @@ def test_erfinvgpu():
assert numpy.allclose(f(xv),f2(xv))
class test_diag(theano.sandbox.linalg.tests.test_linalg.test_diag):
class test_diag(theano.tensor.tests.test_nlinalg.test_diag):
mode = mode_with_gpu
shared = staticmethod(cuda.shared_constructor)
floatX = 'float32'
type = CudaNdarrayType
def __init__(self, name):
super(theano.sandbox.linalg.tests.test_linalg.test_diag,
super(theano.tensor.tests.test_nlinalg.test_diag,
self).__init__(name)
......
......@@ -249,12 +249,12 @@ class GpuMultinomialFromUniform(MultinomialFromUniform, GpuOp):
fail = sub['fail']
return """
if (PyArray_NDIM(%(pvals)s) != 2)
if (CudaNdarray_NDIM(%(pvals)s) != 2)
{
PyErr_Format(PyExc_TypeError, "pvals wrong rank");
%(fail)s;
}
if (PyArray_NDIM(%(unis)s) != 1)
if (CudaNdarray_NDIM(%(unis)s) != 1)
{
PyErr_Format(PyExc_TypeError, "unis wrong rank");
%(fail)s;
......
......@@ -4208,7 +4208,7 @@ def test_local_upcast_elemwise_constant_inputs():
old = theano.config.floatX
theano.config.floatX = 'float32'
try:
v = lvector() / 2
v = lvector()
function([v], theano.tensor.basic.true_div(v, 2))
finally:
theano.config.floatX = old
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论