提交 56d152c8 authored 作者: James Bergstra's avatar James Bergstra

test_elemwise0 passed

上级 5d16a644
from .type import CudaNdarrayType
from .var import (CudaNdarrayVariable,
CudaNdarrayConstant,
CudaNdarraySharedVariable,
shared_constructor)
差异被折叠。
...@@ -4,35 +4,53 @@ from theano import tensor ...@@ -4,35 +4,53 @@ from theano import tensor
import numpy import numpy
import gputensor as gpt import theano_cuda_ndarray as tcn
def test0(): def test_elemwise0():
a = gpt.gpu_tensor_shared_constructor(numpy.random.rand(3,4), 'a') a = tcn.shared_constructor(numpy.random.rand(4,4), 'a')
b = tensor.dmatrix() b = tensor.dmatrix()
f = pfunc([b], [], updates=[(a, a+b)]) f = pfunc([b], [], updates=[(a, a+b)])
a0 = a.value * 1.0 a0 = a.value * 1.0
f(numpy.ones((3,4))) print 'BEFORE ADD', a.value
f(numpy.ones((4,4)))
print f.maker.env.toposort() print f.maker.env.toposort()
print 'AFTER ADD', a.value
assert numpy.all(a0 + 1.0 == a.value) assert numpy.all(a0 + 1.0 == a.value)
def test1(): def test_elemwise1():
""" Several kinds of elemwise expressions with no broadcasting, non power-of-two shape """
a = gpt.gpu_tensor_shared_constructor(numpy.random.rand(3,4), 'a')
shape = (3,4)
a = tcn.shared_constructor(numpy.random.rand(*shape), 'a')
b = tensor.dmatrix() b = tensor.dmatrix()
f = pfunc([b], [], updates=[(a, a+b * tensor.exp(b**a))])
f = pfunc([b], [], updates=[(a, a+b)]) #let debugmode catch any mistakes
for i, node in enumerate( f.maker.env.toposort()): f(numpy.ones(shape))
print 'test1 toposort', i, node
def test_elemwise2():
a0 = a.value * 1.0 """ Several kinds of elemwise expressions with dimension permutations """
f(numpy.ones((3,4)))
shape = (3,4,5,6)
assert numpy.all(a0 + 1.0 == a.value) a = tcn.shared_constructor(numpy.random.rand(*shape), 'a')
b = tensor.Tensor(dtype='float32', broadcastable=[0]*len(shape))()
f = pfunc([b], [], updates=[(a, (a+b).dimshuffle([2,0,3,1]) *
tensor.exp(b**a).dimshuffle([2,0,3,1]))])
#let debugmode catch errors
f(numpy.ones(shape))
def test_elemwise3():
""" Several kinds of elemwise expressions with dimension permutations and broadcasting"""
shape = (3,4,5,6)
a = tcn.shared_constructor(numpy.random.rand(*shape), 'a')
b = tensor.dvector()
f = pfunc([b], [], updates=[(a, (a+b).dimshuffle([2,0,3,1]) * tensor.exp(1 +
b**a).dimshuffle([2,0,3,1]))])
#let debugmode catch errors
f(numpy.ones(6))
差异被折叠。
...@@ -7,15 +7,14 @@ ...@@ -7,15 +7,14 @@
#define DECL(s) static PyObject * s(PyObject * self, PyObject *args) #define DECL(s) static PyObject * s(PyObject * self, PyObject *args)
static PyObject * static PyObject *
filter(PyObject* self, PyObject *args) // args = (data, typenum, broadcastable, strict) filter(PyObject* self, PyObject *args) // args = (data, broadcastable, strict)
{ {
PyObject *py_data=NULL; PyObject *py_data=NULL;
PyArrayObject * data = NULL; PyArrayObject * data = NULL;
int dtype_typenum=-1;
int strict = 0; int strict = 0;
PyObject * broadcastable=NULL; PyObject * broadcastable=NULL;
if (!PyArg_ParseTuple(args, "OiOi", &py_data, &dtype_typenum, &broadcastable, &strict)) return NULL; if (!PyArg_ParseTuple(args, "OOi", &py_data, &broadcastable, &strict)) return NULL;
if (!PyTuple_Check(broadcastable)){ if (!PyTuple_Check(broadcastable)){
PyErr_SetString(PyExc_TypeError, "broadcastable arg should be a tuple of int."); PyErr_SetString(PyExc_TypeError, "broadcastable arg should be a tuple of int.");
...@@ -99,9 +98,9 @@ static PyMethodDef MyMethods[] = { ...@@ -99,9 +98,9 @@ static PyMethodDef MyMethods[] = {
PyMODINIT_FUNC PyMODINIT_FUNC
init_theano_cuda_ndarray(void) inittype_support(void)
{ {
(void) Py_InitModule("_theano_cuda_ndarray", MyMethods); (void) Py_InitModule("type_support", MyMethods);
import_array(); import_array();
} }
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论