提交 dd668d17 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #1395 from delallea/minor

Minor fixes
......@@ -15,8 +15,8 @@
:Parameters: *x* - symbolic Tensor (or compatible)
:Return type: same as x
:Returns: element-wise sigmoid: :math:`sigmoid(x) = \frac{1}{1 + \exp(-x)}`.
:note: see :func:`ultra_fast_sigmoid` or :func:`hard_sigmoid` for faster version.
Speed comparison for 100M float64 element on a Core2 Duo @ 3.16 GHz.
:note: see :func:`ultra_fast_sigmoid` or :func:`hard_sigmoid` for faster versions.
Speed comparison for 100M float64 elements on a Core2 Duo @ 3.16 GHz:
- hard_sigmoid: 1.0s
- ultra_fast_sigmoid: 1.3s
......@@ -44,15 +44,15 @@
:Parameters: *x* - symbolic Tensor (or compatible)
:Return type: same as x
:Returns: approximated element-wise sigmoid: :math:`sigmoid(x) = \frac{1}{1 + \exp(-x)}`.
:note: To automatically change all :func:`sigmoid` op to this version, use
:note: To automatically change all :func:`sigmoid` ops to this version, use
the Theano optimization ``local_ultra_fast_sigmoid``. This can be done
with the Theano flag ``optimizer_including=local_ultra_fast_sigmoid``.
This optimization is done late, so it shouldn't affect
This optimization is done late, so it should not affect
stabilization optimization.
.. note:: The underlying code will return 0.00247262315663 as the
minimum value and 0.997527376843 as the maximum value. So it
never return 0 or 1.
never returns 0 or 1.
......@@ -63,10 +63,10 @@
:Parameters: *x* - symbolic Tensor (or compatible)
:Return type: same as x
:Returns: approximated element-wise sigmoid: :math:`sigmoid(x) = \frac{1}{1 + \exp(-x)}`.
:note: To automatically change all :func:`sigmoid` op to this version, use
:note: To automatically change all :func:`sigmoid` ops to this version, use
the Theano optimization ``local_hard_sigmoid``. This can be done
with the Theano flag ``optimizer_including=local_hard_sigmoid``.
This optimization is done late, so it shouldn't affect
This optimization is done late, so it should not affect
stabilization optimization.
.. note:: The underlying code will return an exact 0 or 1 if an
......
"""We don't have real test for the cache, but it would be great to make them!
"""We don't have real tests for the cache, but it would be great to make them!
But this one test a current behavior that isn't good: the c_code isn't
But this one tests a current behavior that isn't good: the c_code isn't
deterministic based on the input type and the op.
"""
......
......@@ -847,14 +847,15 @@ class ScalarOp(Op):
def c_code_contiguous(self, node, name, inp, out, sub):
"""This function is called by Elemwise when all inputs and
outputs are c_contiguous. This allow to use SIMD version
outputs are c_contiguous. This allows to use the SIMD version
of this op.
The inputs are the same as c_code except:
The inputs are the same as c_code except that:
- inp and out must be the variable name of the ndarray
- node must be the elemwise node. This is needed to know
the inputs/outputs type.
- inp and out must be the names of the variables associated to the
ndarrays in the C code
- node must be the elemwise node (this is needed to know
the inputs/outputs types)
"""
raise theano.gof.utils.MethodNotDefined()
......
......@@ -622,7 +622,7 @@ def get_scalar_constant_value(v):
isinstance(v.owner.op.idx_list[0], (int, long,
numpy.integer))):
# Python 2.4 don't support indexing with numpy.integer
# Python 2.4 does not support indexing with numpy.integer
# So we cast it.
idx = int(v.owner.op.idx_list[0])
ret = v.owner.inputs[0].owner.inputs[idx]
......@@ -1533,15 +1533,15 @@ class _tensor_py_operators:
return True
else:
raise TypeError(
"Variable does not support boolean operations. This"
"can happen if you do logical operator (<, <=, >, <=,"
"==, !=) between numpy.ndarray and theano tensor"
"variable. Due NumPy implementation before NumPy 1.8,"
"we can't make the python syntax work when the ndarray"
"is on the left, and this end with this error. To work"
"around that, just call"
"theano.tensor.{lt,le,eq,ne,gt,ge}(ndarray, tensor) or"
"use the python syntax with the theano tensor on the"
"Variables do not support boolean operations. This "
"can happen if you do a logical operation (<, <=, >, <=, "
"==, !=) between a numpy.ndarray and a Theano tensor"
"variable. Due to NumPy implementation before NumPy 1.8, "
"we cannot make the Python syntax work when the ndarray "
"is on the left, and this results in this error. To work "
"around that, either call "
"theano.tensor.{lt,le,eq,ne,gt,ge}(ndarray, tensor), or "
"use the Python syntax with the Theano tensor on the "
"left. Or update to NumPy 1.8 or above."
)
......@@ -6436,11 +6436,11 @@ class Reshape(Op):
(x.shape, shp))
if not out[0].flags.aligned:
raise RuntimeError("numpy.reshape returned a not aligned tensor."
" NumPy version 1.6.2, 1.7.0 and 1.7.1 have"
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combination. Use another NumPy version."
" Input shape: %s, input stride %s,"
" new_shape %s new_strides %s." % (
" combinations. Use another NumPy version."
" Input shape: %s, input stride: %s,"
" new_shape: %s, new_strides: %s." % (
x.shape, x.strides, shp, out[0].strides))
def connection_pattern(self, node):
......@@ -6545,9 +6545,9 @@ class Reshape(Op):
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't aligned!"
" NumPy version 1.6.2, 1.7.0 and 1.7.1 have"
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combination. Use another NumPy version.");
" combinations. Use another NumPy version.");
%(fail)s;
}
""" % locals()
......
......@@ -251,8 +251,8 @@ except ImportError, e:
# when theano.config.blas.ldflags is defined. So we don't need a
# warning in that case.
if not config.blas.ldflags:
_logger.warning('Failed to import scipy.linalg.blas and '
'Theano flag blas.ldflags empty. '
_logger.warning('Failed to import scipy.linalg.blas, and '
'Theano flag blas.ldflags is empty. '
'Falling back on slower implementations for '
'dot(matrix, vector), dot(vector, matrix) and '
'dot(vector, vector) (%s)',
......
......@@ -98,7 +98,7 @@ for i in xrange(750):
// We block to keep the data in l1
// normal l1 size = 32k: 32k/2(input + output)/8(nb bytes of double)=2k
// We stay bellow the 2k limit to let space for
// This is faster then the not blocking version
// This is faster than the not blocking version
for(int i=0;i<n;i+=2048){
npy_intp nb = (n-i<2048)?n-i:2048;
for(int j=0;j<nb;j++){
......@@ -134,9 +134,9 @@ for i in xrange(750):
import os
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data, val)#, 'o-')
ax.plot(data, val_ultra)#, '-')
ax.plot(data, val_hard)#, '-')
ax.plot(data, val) # , 'o-')
ax.plot(data, val_ultra) # , '-')
ax.plot(data, val_hard) # , '-')
ax.grid(True)
ax.legend(("sigmoid", "ultra_fast", "hard"), "upper left")
fname = os.path.join(os.path.dirname(theano.__file__), '..',
......@@ -234,13 +234,13 @@ def local_ultra_fast_sigmoid(node):
"""
When enabled, change all sigmoid to ultra_fast_sigmoid.
To example do mode.including('local_ultra_fast_sigmoid')
For example do mode.including('local_ultra_fast_sigmoid')
or use the Theano flag optimizer_including=local_ultra_fast_sigmoid
This speed up the sigmoid op by using an approximation.
This speeds up the sigmoid op by using an approximation.
This is done after the stabilization and specialize phase
to don't interact with them.
This is done after the stabilization and specialize phases
to avoid interacting with them.
"""
if (isinstance(node.op, tensor.Elemwise) and
......@@ -261,16 +261,16 @@ theano.compile.optdb['uncanonicalize'].register("local_ultra_fast_sigmoid",
def hard_sigmoid(x):
"""An approximation of sigmoid.
More approximate and faster then ultra_fast_sigmoid.
More approximate and faster than ultra_fast_sigmoid.
Approx in 3 parts: 0, scaled linear, 1
Removing the slop and shift don't make it faster.
Removing the slope and shift does not make it faster.
"""
slop = 0.2
slope = 0.2
shift = 0.5
x = (x * 0.2) + shift
x = (x * slope) + shift
x = tensor.clip(x, 0, 1)
return x
......@@ -330,7 +330,8 @@ class ScalarSoftplus(scalar.UnaryScalarOp):
return (2,) + v
else:
return v
scalar_softplus = ScalarSoftplus(scalar.upgrade_to_float, name= 'scalar_softplus')
scalar_softplus = ScalarSoftplus(scalar.upgrade_to_float,
name='scalar_softplus')
softplus = elemwise.Elemwise(scalar_softplus, name='softplus')
pprint.assign(softplus, printing.FunctionPrinter('softplus'))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论