提交 0f67db4b authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Merge pull request #522 from nouiz/fix_blas

Fix blas
......@@ -42,6 +42,9 @@ Internal changes
* Define new exceptions MissingInputError and UnusedInputError, and use them
in theano.function, instead of TypeError and ValueError. (Pascal L.)
Crash Fix
* Don't try to use blas library when told to don't use them(Frederic B.)
=============
Release Notes
=============
......
......@@ -11,6 +11,7 @@ from theano.tensor.basic import TensorType
try:
import scipy.sparse
from theano.sparse.basic import SparseType
def _is_sparse(a):
return scipy.sparse.issparse(a)
except ImportError:
......@@ -26,6 +27,7 @@ else:
def _is_cuda(a):
return False
def may_share_memory(a, b, raise_other_type=True):
a_ndarray = isinstance(a, numpy.ndarray)
b_ndarray = isinstance(b, numpy.ndarray)
......@@ -34,16 +36,18 @@ def may_share_memory(a, b, raise_other_type=True):
a_cuda = _is_cuda(a)
b_cuda = _is_cuda(b)
if not(a_ndarray or a_sparse or a_cuda) or not(b_ndarray or b_sparse or b_cuda):
if (not(a_ndarray or a_sparse or a_cuda) or
not(b_ndarray or b_sparse or b_cuda)):
if raise_other_type:
raise TypeError("may_share_memory support only ndarray and scipy.sparse and CudaNdarray type")
raise TypeError("may_share_memory support only ndarray"
" and scipy.sparse and CudaNdarray type")
return False
if a_ndarray and b_ndarray:
return TensorType.may_share_memory(a,b)
return TensorType.may_share_memory(a, b)
if a_cuda and b_cuda:
from theano.sandbox.cuda.type import CudaNdarrayType
return CudaNdarrayType.may_share_memory(a,b)
return CudaNdarrayType.may_share_memory(a, b)
if a_cuda or b_cuda:
return False
return SparseType.may_share_memory(a,b)
return SparseType.may_share_memory(a, b)
from theano.gof import Op
from theano import config
from blas import ldflags, blas_header_text
from blas import blas_optdb, optdb, local_optimizer, EquilibriumOptimizer
......@@ -239,6 +241,8 @@ class CGer(BaseBLAS, Ger):
@local_optimizer([ger, ger_destructive])
def use_c_ger(node):
# Only float32 and float64 are supported for now.
if not config.blas.ldflags:
return
if (node.op == ger and
node.outputs[0].dtype in ['float32', 'float64']):
return [CGer(False)(*node.inputs)]
......@@ -510,6 +514,8 @@ class CGemv(BaseBLAS, Gemv):
@local_optimizer([gemv_inplace, gemv_no_inplace])
def use_c_gemv(node):
# Only float32 and float64 are supported for now.
if not config.blas.ldflags:
return
if (node.op == gemv_no_inplace and
node.outputs[0].dtype in ['float32', 'float64']):
return [CGemv(inplace=False)(*node.inputs)]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论