提交 7a750ee1 authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Fixed typo in help/comments: usefull -> useful

上级 49baa66c
......@@ -1248,7 +1248,7 @@ class _Linker(gof.link.LocalLinker):
debug("done with node")
if False:
#This could be usefull to help finding refcount problem.
#This could be useful to help finding refcount problem.
#But it is very slow and it is not sure it will help.
gc.collect()
......
......@@ -364,7 +364,7 @@ class ProfileMode(Mode):
print '%i Theano fct call, %.3fs per call'%(total_fct_call, time_per_call)
print
print "List of apply that don't have float64 as input but have float64 in outputs. Usefull to know if we forgot some cast when using floatX=float32 or gpu code."
print "List of apply that don't have float64 as input but have float64 in outputs. Useful to know if we forgot some cast when using floatX=float32 or gpu code."
print '<Apply> <Apply position> <fct name> <inputs type> <outputs type>'
for fct in fct_call.keys():
for idx, node in enumerate(fct.maker.env.toposort()):
......
......@@ -24,7 +24,7 @@ AddConfigVar('device',
)
AddConfigVar('init_gpu_device',
"Initialize the gpu device to use. This don't change the default behavior. We don't default to try to move the computation to it. We don't default to put shared variable of float32 on it. Usefull to run the test on a specific gpu.",
"Initialize the gpu device to use. This don't change the default behavior. We don't default to try to move the computation to it. We don't default to put shared variable of float32 on it. Useful to run the test on a specific gpu.",
EnumStr('', 'gpu0', 'gpu1', 'gpu2', 'gpu3',
allow_override=False)
)
......
......@@ -1917,7 +1917,7 @@ class GpuContiguous(Op):
gpu_contiguous = GpuContiguous()
# Those are predifined CudaNdarrayType as done in tensor.basic
# Usefull mostly for test as the gpu op are inserted automatically...
# Useful mostly for test as the gpu op are inserted automatically...
fscalar = CudaNdarrayType(dtype='float32', broadcastable=())
def scalar(name = None, dtype = None):
......
......@@ -342,7 +342,7 @@ def test_elemwise4():
def speed_elemwise_collapse():
""" used to time if the collapse of ccontiguous dims are usefull """
""" used to time if the collapse of ccontiguous dims are useful """
shape = (30,40,50,600)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
......
......@@ -496,7 +496,7 @@ class ConvOp(Op):
return "ConvOp{" +",".join(str((a, getattr(self, a))) for a in self.__attrnames) + "}"
def set_flops(self):
""" Usefull with the hack in profilemode to print the MFlops"""
""" Useful with the hack in profilemode to print the MFlops"""
if self.out_mode=="valid":
self.flops=self.kshp[0]*self.kshp[1]*2#nb mul and add by output pixed
self.flops*=self.outshp[0]*self.outshp[1]#nb flops by output image
......
......@@ -1251,7 +1251,7 @@ def local_mul_switch_sink(node):
T.grad(y,x) -> grad(f(x),x) * grad(y,f(x)) + grad(g(x),x) * grad(y,g(x))
**with the optimization
T.grad(y,x) -> switch(cond,grad(f(x),x), 0) + switch(cond,0,grad(g(x),x))
This will be particularly usefull for the lazyif because we skip
This will be particularly useful for the lazyif because we skip
an entire part of the graph.
"""
if node.op!=T.mul:
......
......@@ -354,7 +354,7 @@ def makeSharedTester(shared_constructor_,
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace
# Test by set_value with borrow=False when new data casted.
# specificaly usefull for gpu data
# specificaly useful for gpu data
nd += 1
old_data = x_shared.container.storage[0]
x_shared.set_value(self.cast_value(nd), borrow=False)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论