提交 7a750ee1 authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Fixed typo in help/comments: usefull -> useful

上级 49baa66c
...@@ -1248,7 +1248,7 @@ class _Linker(gof.link.LocalLinker): ...@@ -1248,7 +1248,7 @@ class _Linker(gof.link.LocalLinker):
debug("done with node") debug("done with node")
if False: if False:
#This could be usefull to help finding refcount problem. #This could be useful to help finding refcount problem.
#But it is very slow and it is not sure it will help. #But it is very slow and it is not sure it will help.
gc.collect() gc.collect()
......
...@@ -364,7 +364,7 @@ class ProfileMode(Mode): ...@@ -364,7 +364,7 @@ class ProfileMode(Mode):
print '%i Theano fct call, %.3fs per call'%(total_fct_call, time_per_call) print '%i Theano fct call, %.3fs per call'%(total_fct_call, time_per_call)
print print
print "List of apply that don't have float64 as input but have float64 in outputs. Usefull to know if we forgot some cast when using floatX=float32 or gpu code." print "List of apply that don't have float64 as input but have float64 in outputs. Useful to know if we forgot some cast when using floatX=float32 or gpu code."
print '<Apply> <Apply position> <fct name> <inputs type> <outputs type>' print '<Apply> <Apply position> <fct name> <inputs type> <outputs type>'
for fct in fct_call.keys(): for fct in fct_call.keys():
for idx, node in enumerate(fct.maker.env.toposort()): for idx, node in enumerate(fct.maker.env.toposort()):
......
...@@ -24,7 +24,7 @@ AddConfigVar('device', ...@@ -24,7 +24,7 @@ AddConfigVar('device',
) )
AddConfigVar('init_gpu_device', AddConfigVar('init_gpu_device',
"Initialize the gpu device to use. This don't change the default behavior. We don't default to try to move the computation to it. We don't default to put shared variable of float32 on it. Usefull to run the test on a specific gpu.", "Initialize the gpu device to use. This don't change the default behavior. We don't default to try to move the computation to it. We don't default to put shared variable of float32 on it. Useful to run the test on a specific gpu.",
EnumStr('', 'gpu0', 'gpu1', 'gpu2', 'gpu3', EnumStr('', 'gpu0', 'gpu1', 'gpu2', 'gpu3',
allow_override=False) allow_override=False)
) )
......
...@@ -1917,7 +1917,7 @@ class GpuContiguous(Op): ...@@ -1917,7 +1917,7 @@ class GpuContiguous(Op):
gpu_contiguous = GpuContiguous() gpu_contiguous = GpuContiguous()
# Those are predifined CudaNdarrayType as done in tensor.basic # Those are predifined CudaNdarrayType as done in tensor.basic
# Usefull mostly for test as the gpu op are inserted automatically... # Useful mostly for test as the gpu op are inserted automatically...
fscalar = CudaNdarrayType(dtype='float32', broadcastable=()) fscalar = CudaNdarrayType(dtype='float32', broadcastable=())
def scalar(name = None, dtype = None): def scalar(name = None, dtype = None):
......
...@@ -342,7 +342,7 @@ def test_elemwise4(): ...@@ -342,7 +342,7 @@ def test_elemwise4():
def speed_elemwise_collapse(): def speed_elemwise_collapse():
""" used to time if the collapse of ccontiguous dims are usefull """ """ used to time if the collapse of ccontiguous dims are useful """
shape = (30,40,50,600) shape = (30,40,50,600)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
......
...@@ -496,7 +496,7 @@ class ConvOp(Op): ...@@ -496,7 +496,7 @@ class ConvOp(Op):
return "ConvOp{" +",".join(str((a, getattr(self, a))) for a in self.__attrnames) + "}" return "ConvOp{" +",".join(str((a, getattr(self, a))) for a in self.__attrnames) + "}"
def set_flops(self): def set_flops(self):
""" Usefull with the hack in profilemode to print the MFlops""" """ Useful with the hack in profilemode to print the MFlops"""
if self.out_mode=="valid": if self.out_mode=="valid":
self.flops=self.kshp[0]*self.kshp[1]*2#nb mul and add by output pixed self.flops=self.kshp[0]*self.kshp[1]*2#nb mul and add by output pixed
self.flops*=self.outshp[0]*self.outshp[1]#nb flops by output image self.flops*=self.outshp[0]*self.outshp[1]#nb flops by output image
......
...@@ -1251,7 +1251,7 @@ def local_mul_switch_sink(node): ...@@ -1251,7 +1251,7 @@ def local_mul_switch_sink(node):
T.grad(y,x) -> grad(f(x),x) * grad(y,f(x)) + grad(g(x),x) * grad(y,g(x)) T.grad(y,x) -> grad(f(x),x) * grad(y,f(x)) + grad(g(x),x) * grad(y,g(x))
**with the optimization **with the optimization
T.grad(y,x) -> switch(cond,grad(f(x),x), 0) + switch(cond,0,grad(g(x),x)) T.grad(y,x) -> switch(cond,grad(f(x),x), 0) + switch(cond,0,grad(g(x),x))
This will be particularly usefull for the lazyif because we skip This will be particularly useful for the lazyif because we skip
an entire part of the graph. an entire part of the graph.
""" """
if node.op!=T.mul: if node.op!=T.mul:
......
...@@ -354,7 +354,7 @@ def makeSharedTester(shared_constructor_, ...@@ -354,7 +354,7 @@ def makeSharedTester(shared_constructor_,
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace
# Test by set_value with borrow=False when new data casted. # Test by set_value with borrow=False when new data casted.
# specificaly usefull for gpu data # specificaly useful for gpu data
nd += 1 nd += 1
old_data = x_shared.container.storage[0] old_data = x_shared.container.storage[0]
x_shared.set_value(self.cast_value(nd), borrow=False) x_shared.set_value(self.cast_value(nd), borrow=False)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论