提交 ddd1dc03 authored 作者: nouiz's avatar nouiz

Merge pull request #235 from delallea/minor

Minor
......@@ -991,11 +991,11 @@ class FunctionMaker(object):
# optimize the env
compute_test_value_orig = theano.config.compute_test_value
add_stack_trace_on_call = gof.Op.add_stack_trace_on_call
try:
theano.config.compute_test_value = "off"
start_optimizer = time.time()
add_stack_trace_on_call = gof.Op.add_stack_trace_on_call
gof.Op.add_stack_trace_on_call = False
start_optimizer = time.time()
optimizer(env)
end_optimizer = time.time()
......
......@@ -5,12 +5,14 @@ The elemwise fct are also used with scalar operation! So it can happen that ndim
"""
import StringIO, sys
import copy, logging, StringIO, sys
import numpy
from theano import Op, Type, Apply, Variable, Constant
from theano import tensor, scalar, gof
import logging, copy
from theano import Apply, Constant, Op, Type, Variable
from theano import gof, scalar, tensor
_logger_name = 'theano.sandbox.cuda.elemwise'
_logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.INFO)
......@@ -47,7 +49,7 @@ class NaiveAlgo(object):
if code:
raise SupportCodeError(scalar_op)
except gof.utils.MethodNotDefined:
pass
pass
self.scalar_op = scalar_op
self.sync = sync
self.inplace_pattern = inplace_pattern
......
......@@ -696,9 +696,11 @@ def spectral_radius_bound(X, log2_exponent):
if X.type.ndim != 2:
raise TypeError('spectral_radius_bound requires a matrix argument', X)
if not isinstance(log2_exponent, int):
raise TypeError('spectral_radius_bound requires a integer exponent', log2_exponent)
raise TypeError('spectral_radius_bound requires an integer exponent',
log2_exponent)
if log2_exponent <= 0:
raise ValueError('spectral_radius_bound requires a strictly positive exponent', log2_exponent)
raise ValueError('spectral_radius_bound requires a strictly positive '
'exponent', log2_exponent)
XX = X
for i in xrange(log2_exponent):
XX = tensor.dot(XX, XX)
......
......@@ -11,8 +11,10 @@ if cuda_available:
class BadOldCode(Exception):
""" We create a specific Exception to be sure it don't get caught
by mistake"""
"""
We create a specific Exception to be sure it does not get caught by
mistake.
"""
pass
......
......@@ -2600,10 +2600,11 @@ class T_Scan(unittest.TestCase):
assert numpy.allclose(theano_y , v_y[-4:])
def test_opt_order(self):
""" Verify that scan optimizations are applied before blas
optimizations.
This is needed as otherwise, the dot won't become a dot22
so it will be slower and won't get transferred to the gpu.
"""
Verify that scan optimizations are applied before blas
optimizations.
This is needed as otherwise, the dot won't become a dot22
so it will be slower and won't get transferred to the gpu.
"""
x = theano.tensor.matrix('x')
A = theano.tensor.matrix('A')
......
......@@ -182,7 +182,7 @@ def Lop(f, wrt, eval_points, consider_constant=None, warn_type=False,
inputs = gof.graph.inputs(f)
gmap = gradient.grad_sources_inputs(
zip(f,eval_points),
zip(f, eval_points),
list(inputs) + list(consider_constant),
warn_type=warn_type)
......@@ -337,7 +337,7 @@ def grad(cost, wrt, g_cost=None, consider_constant=None, warn_type=False,
if len(ret) == 1 and not (using_list or using_tuple):
# `wrt` was a single Variable, so we return a single Variable too.
return ret[0]
return ret[0]
else:
# Ensure we preserve the original type of `wrt`.
if using_tuple:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论