提交 0ac72988 authored 作者: Hector's avatar Hector 提交者: Thomas Wiecki

Fix typos in tests

上级 bbbbfcb5
...@@ -710,7 +710,7 @@ class TestAliasingRules: ...@@ -710,7 +710,7 @@ class TestAliasingRules:
# with the memory of normal python variables that the user uses. # with the memory of normal python variables that the user uses.
# #
# 2. shared variables are allocated in this memory space, as are the # 2. shared variables are allocated in this memory space, as are the
# temporaries used for Function evalution. # temporaries used for Function evaluation.
# #
# 3. Physically, this managed memory space may be spread across the host, # 3. Physically, this managed memory space may be spread across the host,
# on a GPU device(s), or even on a remote machine. # on a GPU device(s), or even on a remote machine.
......
...@@ -387,7 +387,7 @@ class TestFunction: ...@@ -387,7 +387,7 @@ class TestFunction:
) )
cpy = ori.copy(swap=swap) cpy = ori.copy(swap=swap)
# run fuction several time # run function several times
ori(1), cpy(1), cpy(2) ori(1), cpy(1), cpy(2)
# assert same SharedVariable are update in different function # assert same SharedVariable are update in different function
......
...@@ -37,7 +37,7 @@ class TestDnnConv2d(BaseTestConv2d): ...@@ -37,7 +37,7 @@ class TestDnnConv2d(BaseTestConv2d):
def setup_class(cls): def setup_class(cls):
super().setup_class() super().setup_class()
cls.shared = staticmethod(gpuarray_shared_constructor) cls.shared = staticmethod(gpuarray_shared_constructor)
# provide_shape is not used by the cuDNN impementation # provide_shape is not used by the cuDNN implementation
cls.provide_shape = [False] cls.provide_shape = [False]
@pytest.mark.skipif(dnn_available(test_ctx_name), reason=dnn_available.msg) @pytest.mark.skipif(dnn_available(test_ctx_name), reason=dnn_available.msg)
...@@ -131,7 +131,7 @@ class TestDnnConv3d(BaseTestConv3d): ...@@ -131,7 +131,7 @@ class TestDnnConv3d(BaseTestConv3d):
def setup_class(cls): def setup_class(cls):
super().setup_class() super().setup_class()
cls.shared = staticmethod(gpuarray_shared_constructor) cls.shared = staticmethod(gpuarray_shared_constructor)
# provide_shape is not used by the cuDNN impementation # provide_shape is not used by the cuDNN implementation
cls.provide_shape = [False] cls.provide_shape = [False]
@pytest.mark.skipif(dnn_available(test_ctx_name), reason=dnn_available.msg) @pytest.mark.skipif(dnn_available(test_ctx_name), reason=dnn_available.msg)
......
...@@ -206,7 +206,7 @@ def makeTester( ...@@ -206,7 +206,7 @@ def makeTester(
for description, check in self.checks.items(): for description, check in self.checks.items():
assert check(inputs, variables), ( assert check(inputs, variables), (
"Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)" "Test %s::%s: Failed check: %s " "(inputs were %s, outputs were %s)"
) % (self.op, testname, description, inputs, variables) ) % (self.op, testname, description, inputs, variables)
Checker.__name__ = name Checker.__name__ = name
......
...@@ -28,7 +28,7 @@ class TestBlockSparseGemvAndOuterGPUarray(TestBlockSparseGemvAndOuter): ...@@ -28,7 +28,7 @@ class TestBlockSparseGemvAndOuterGPUarray(TestBlockSparseGemvAndOuter):
@pytest.mark.skip( @pytest.mark.skip(
reason=""" reason="""
This test is temporarily disabled since we disabled the output_merge This test is temporarily disabled since we disabled the output_merge
and alpha_merge optimizations for blocksparse due to brokeness. and alpha_merge optimizations for blocksparse due to brokenness.
Re-enable when those are re-added. Re-enable when those are re-added.
""" """
) )
......
...@@ -1284,7 +1284,7 @@ def run_conv_small_batched_vs_multicall(inputs_shape, filters_shape, batch_sub): ...@@ -1284,7 +1284,7 @@ def run_conv_small_batched_vs_multicall(inputs_shape, filters_shape, batch_sub):
f = aesara.function([], [conv, sub_conv_top, sub_conv_bottom], mode=mode_with_gpu) f = aesara.function([], [conv, sub_conv_top, sub_conv_bottom], mode=mode_with_gpu)
res_all, res_batch_top, res_batch_bottom = f() res_all, res_batch_top, res_batch_bottom = f()
for i in range(batch_sub): for i in range(batch_sub):
# Check first ouputs. # Check first outputs.
utt.assert_allclose(res_batch_top[i], res_all[i]) utt.assert_allclose(res_batch_top[i], res_all[i])
# Then check last outputs. # Then check last outputs.
p = batch_size - batch_sub + i p = batch_size - batch_sub + i
......
# Test that normaly could be outside gpuarray, to have all gpuarray # Test that normally could be outside gpuarray, to have all gpuarray
# tests in the same directory, we put them here. # tests in the same directory, we put them here.
import numpy as np import numpy as np
......
...@@ -291,7 +291,7 @@ def test_local_gpualloc_empty(): ...@@ -291,7 +291,7 @@ def test_local_gpualloc_empty():
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert len(topo) == 1 assert len(topo) == 1
assert isinstance(topo[0].op, AllocEmpty) assert isinstance(topo[0].op, AllocEmpty)
# This return not initilized data, so we can only check the shape # This return not initialized data, so we can only check the shape
assert f(3).shape == (3,) assert f(3).shape == (3,)
assert _check_stack_trace(f) assert _check_stack_trace(f)
...@@ -302,7 +302,7 @@ def test_local_gpualloc_empty(): ...@@ -302,7 +302,7 @@ def test_local_gpualloc_empty():
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert len(topo) == 3 assert len(topo) == 3
assert isinstance(topo[0].op, GpuAllocEmpty) assert isinstance(topo[0].op, GpuAllocEmpty)
# This return not initilized data, so we can only check the shape # This return not initialized data, so we can only check the shape
assert f(3).shape == (3,) assert f(3).shape == (3,)
assert _check_stack_trace(f) assert _check_stack_trace(f)
...@@ -312,7 +312,7 @@ def test_local_gpualloc_empty(): ...@@ -312,7 +312,7 @@ def test_local_gpualloc_empty():
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert len(topo) == 3 assert len(topo) == 3
assert isinstance(topo[0].op, GpuAllocEmpty) assert isinstance(topo[0].op, GpuAllocEmpty)
# This return not initilized data, so we can only check the shape # This return not initialized data, so we can only check the shape
assert f(3, 4).shape == (3, 4) assert f(3, 4).shape == (3, 4)
assert _check_stack_trace(f) assert _check_stack_trace(f)
...@@ -550,7 +550,7 @@ def test_local_gpu_elemwise(): ...@@ -550,7 +550,7 @@ def test_local_gpu_elemwise():
utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v) utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)
assert _check_stack_trace(f) assert _check_stack_trace(f)
return # Not yet implemeted return # Not yet implemented
# Test multiple output # Test multiple output
a_s = aesara.scalar.float32() a_s = aesara.scalar.float32()
a = fmatrix() a = fmatrix()
......
...@@ -19,7 +19,7 @@ from tests.gpuarray.test_basic_ops import rand_gpuarray ...@@ -19,7 +19,7 @@ from tests.gpuarray.test_basic_ops import rand_gpuarray
test_size = 10000 test_size = 10000
# NB: This order of "unsorted axes" is arbitrary and is here # NB: This order of "unsorted axes" is arbitrary and is here
# just to have the same informations on profile output # just to have the same information on profile output
# from one test to another. # from one test to another.
unsorted_axes = (2, 4, 0, 3, 1) unsorted_axes = (2, 4, 0, 3, 1)
...@@ -88,7 +88,7 @@ class BaseTest: ...@@ -88,7 +88,7 @@ class BaseTest:
pytest.skip("No tensor ndim defined.") pytest.skip("No tensor ndim defined.")
if self.tensor_size < 0 or self.tensor_size > 5: if self.tensor_size < 0 or self.tensor_size > 5:
pytest.skip( pytest.skip(
"We allow from 0 (included) to 5 (inclued) dimensons for these tests." "We allow from 0 (included) to 5 (included) dimensons for these tests."
) )
if self.shape is None: if self.shape is None:
self.shape = self.get_shape() self.shape = self.get_shape()
......
...@@ -549,7 +549,7 @@ class ScanGpuTests: ...@@ -549,7 +549,7 @@ class ScanGpuTests:
def test_gpu_memory_usage(self): def test_gpu_memory_usage(self):
# This test validates that the memory usage of the defined aesara # This test validates that the memory usage of the defined aesara
# function is reasonnable when executed on the GPU. It checks for # function is reasonable when executed on the GPU. It checks for
# a bug in which one of scan's optimization was not applied which # a bug in which one of scan's optimization was not applied which
# made the scan node compute large and unnecessary outputs which # made the scan node compute large and unnecessary outputs which
# brought memory usage on the GPU to ~12G. # brought memory usage on the GPU to ~12G.
......
...@@ -46,7 +46,7 @@ class MyType(Type): ...@@ -46,7 +46,7 @@ class MyType(Type):
@staticmethod @staticmethod
def may_share_memory(a, b): def may_share_memory(a, b):
# As this represent a string and string are immutable, they # As this represent a string and string are immutable, they
# never share memory in the DebugMode sence. This is needed as # never share memory in the DebugMode sense. This is needed as
# Python reuse string internally. # Python reuse string internally.
return False return False
......
...@@ -219,7 +219,7 @@ class TestEnumTypes: ...@@ -219,7 +219,7 @@ class TestEnumTypes:
except AttributeError: except AttributeError:
pass pass
else: else:
raise Exception("EnumList with invalid name should faild.") raise Exception("EnumList with invalid name should fail.")
try: try:
EnumType(**{invalid_name: 0}) EnumType(**{invalid_name: 0})
......
...@@ -411,7 +411,7 @@ def test_reallocation(): ...@@ -411,7 +411,7 @@ def test_reallocation():
x = scalar("x") x = scalar("x")
y = scalar("y") y = scalar("y")
z = tanh(3 * x + y) + cosh(x + 5 * y) z = tanh(3 * x + y) + cosh(x + 5 * y)
# The functinality is currently implement for non lazy and non c VM only. # The functionality is currently implement for non lazy and non c VM only.
for linker in [ for linker in [
VMLinker(allow_gc=False, lazy=False, use_cloop=False), VMLinker(allow_gc=False, lazy=False, use_cloop=False),
VMLinker(allow_gc=True, lazy=False, use_cloop=False), VMLinker(allow_gc=True, lazy=False, use_cloop=False),
......
...@@ -872,7 +872,7 @@ class TestScan: ...@@ -872,7 +872,7 @@ class TestScan:
# a bit of explaining: # a bit of explaining:
# due to the definition of sequences taps in scan, v_0[0] is # due to the definition of sequences taps in scan, v_0[0] is
# actually v_0[-2], and v_0[1] is v_0[-1]. The values v_0[2] # actually v_0[-2], and v_0[1] is v_0[-1]. The values v_0[2]
# and v_0[3] do not get uesd ( because you do not use v_0[t] # and v_0[3] do not get used ( because you do not use v_0[t]
# in scan) which might seem strange, but then again why not use # in scan) which might seem strange, but then again why not use
# v_0[t] instead of v_0[t-2] in a real application ?? # v_0[t] instead of v_0[t-2] in a real application ??
# also vx0[0] corresponds to vx0[-2], vx0[1] to vx0[-1] # also vx0[0] corresponds to vx0[-2], vx0[1] to vx0[-1]
...@@ -3267,7 +3267,7 @@ class TestScan: ...@@ -3267,7 +3267,7 @@ class TestScan:
@pytest.mark.skip( @pytest.mark.skip(
reason="This test fails because not typed outputs_info " reason="This test fails because not typed outputs_info "
"are always gived the smallest dtype. There is " "are always given the smallest dtype. There is "
"no upcast of outputs_info in scan for now.", "no upcast of outputs_info in scan for now.",
) )
def test_outputs_info_not_typed(self): def test_outputs_info_not_typed(self):
...@@ -3639,7 +3639,7 @@ class TestScan: ...@@ -3639,7 +3639,7 @@ class TestScan:
grad(scan_outputs[0].sum(), out_init[1]) grad(scan_outputs[0].sum(), out_init[1])
# Validate the connnection pattern is as it should be # Validate the connection pattern is as it should be
node = scan_outputs[0].owner node = scan_outputs[0].owner
connection_pattern = node.op.connection_pattern(node) connection_pattern = node.op.connection_pattern(node)
expected_connection_pattern = [ expected_connection_pattern = [
......
...@@ -207,7 +207,7 @@ class TestPushOutScanOutputDot: ...@@ -207,7 +207,7 @@ class TestPushOutScanOutputDot:
scan_node = [ scan_node = [
node for node in f_opt.maker.fgraph.toposort() if isinstance(node.op, Scan) node for node in f_opt.maker.fgraph.toposort() if isinstance(node.op, Scan)
][0] ][0]
# NOTE: WHEN INFER_SHAPE IS REENABLED, BELOW THE SCAN MUST # NOTE: WHEN INFER_SHAPE IS RE-ENABLED, BELOW THE SCAN MUST
# HAVE ONLY 1 OUTPUT. # HAVE ONLY 1 OUTPUT.
assert len(scan_node.op.outputs) == 2 assert len(scan_node.op.outputs) == 2
assert not isinstance(scan_node.op.outputs[0], Dot) assert not isinstance(scan_node.op.outputs[0], Dot)
...@@ -282,7 +282,7 @@ class TestPushOutSumOfDot: ...@@ -282,7 +282,7 @@ class TestPushOutSumOfDot:
# #
# 'dim' has been reduced from 1000 to 5 to make the test run faster # 'dim' has been reduced from 1000 to 5 to make the test run faster
# Parameters from an actual machine tranlation run # Parameters from an actual machine translation run
batch_size = 80 batch_size = 80
seq_len = 50 seq_len = 50
dim = 5 dim = 5
......
...@@ -2728,7 +2728,7 @@ def _hv_switch(op, expected_function): ...@@ -2728,7 +2728,7 @@ def _hv_switch(op, expected_function):
:Parameters: :Parameters:
- `op`: HStack or VStack class. - `op`: HStack or VStack class.
- `expected_function`: function from scipy for comparaison. - `expected_function`: function from scipy for comparison.
""" """
class TestXStack(_TestHVStack): class TestXStack(_TestHVStack):
......
...@@ -9,7 +9,7 @@ from tests.sparse.test_basic import as_sparse_format ...@@ -9,7 +9,7 @@ from tests.sparse.test_basic import as_sparse_format
def test_hash_from_sparse(): def test_hash_from_sparse():
hashs = [] hashes = []
rng = np.random.rand(5, 5) rng = np.random.rand(5, 5)
for format in ["csc", "csr"]: for format in ["csc", "csr"]:
...@@ -41,11 +41,11 @@ def test_hash_from_sparse(): ...@@ -41,11 +41,11 @@ def test_hash_from_sparse():
]: ]:
data = as_sparse_format(data, format) data = as_sparse_format(data, format)
hashs.append(hash_from_sparse(data)) hashes.append(hash_from_sparse(data))
# test that different type of views and their copy give the same hash # test that different type of views and their copy give the same hash
assert hash_from_sparse(rng[1:]) == hash_from_sparse(rng[1:].copy()) assert hash_from_sparse(rng[1:]) == hash_from_sparse(rng[1:].copy())
assert hash_from_sparse(rng[1:3]) == hash_from_sparse(rng[1:3].copy()) assert hash_from_sparse(rng[1:3]) == hash_from_sparse(rng[1:3].copy())
assert hash_from_sparse(rng[:4]) == hash_from_sparse(rng[:4].copy()) assert hash_from_sparse(rng[:4]) == hash_from_sparse(rng[:4].copy())
assert len(set(hashs)) == len(hashs) assert len(set(hashes)) == len(hashes)
...@@ -174,7 +174,7 @@ class TestBlockSparseGemvAndOuter(utt.InferShapeTester): ...@@ -174,7 +174,7 @@ class TestBlockSparseGemvAndOuter(utt.InferShapeTester):
utt.assert_allclose(ref_out, th_out) utt.assert_allclose(ref_out, th_out)
def test_sparseblockgemvF(self): def test_sparseblockgemvF(self):
# Test the fortan order for W (which can happen in the grad for some # Test the fortran order for W (which can happen in the grad for some
# graphs). # graphs).
b = fmatrix() b = fmatrix()
...@@ -255,7 +255,7 @@ class TestBlockSparseGemvAndOuter(utt.InferShapeTester): ...@@ -255,7 +255,7 @@ class TestBlockSparseGemvAndOuter(utt.InferShapeTester):
W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data()
# just make sure that it runs correcly and all the shapes are ok. # just make sure that it runs correctly and all the shapes are ok.
b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val) b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
assert b_g.shape == b_val.shape assert b_g.shape == b_val.shape
......
...@@ -408,7 +408,7 @@ class TestConv2D(utt.InferShapeTester): ...@@ -408,7 +408,7 @@ class TestConv2D(utt.InferShapeTester):
@pytest.mark.slow @pytest.mark.slow
def test_invalid_input_shape(self): def test_invalid_input_shape(self):
# Tests that when the shape gived at build time is not the same as # Tests that when the shape given at build time is not the same as
# run time we raise an error # run time we raise an error
for unroll_batch in [None, 1, 3]: for unroll_batch in [None, 1, 3]:
......
...@@ -233,7 +233,7 @@ class TestImages2Neibs(unittest_tools.InferShapeTester): ...@@ -233,7 +233,7 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
[13, 14, 10, 18, 19, 15, 23, 24, 20], [13, 14, 10, 18, 19, 15, 23, 24, 20],
] ]
# TODO test discontinous image # TODO test discontinuous image
for shp_idx, (shape, neib_shape, neib_step, expected) in enumerate( for shp_idx, (shape, neib_shape, neib_step, expected) in enumerate(
[ [
......
...@@ -2136,7 +2136,7 @@ def test_tile(): ...@@ -2136,7 +2136,7 @@ def test_tile():
# error raising test: # error raising test:
# reps is vector and len(reps_value) > ndim, # reps is vector and len(reps_value) > ndim,
# reps_value is the real value when excuting the function. # reps_value is the real value when executing the function.
reps = ivector() reps = ivector()
r = [2, 3, 4, 5, 6, 7] r = [2, 3, 4, 5, 6, 7]
reps_ = r[: k + 2] reps_ = r[: k + 2]
......
...@@ -53,7 +53,7 @@ def test_gc_never_pickles_temporaries(): ...@@ -53,7 +53,7 @@ def test_gc_never_pickles_temporaries():
# len_pre_g = len(pre_g) # len_pre_g = len(pre_g)
# We can't compare the content or the length of the string # We can't compare the content or the length of the string
# between f and g. 2 reason, we store some timming information # between f and g. 2 reason, we store some timing information
# in float. They won't be the same each time. Different float # in float. They won't be the same each time. Different float
# can have different length when printed. # can have different length when printed.
......
...@@ -640,7 +640,7 @@ class TestSoftplus: ...@@ -640,7 +640,7 @@ class TestSoftplus:
utt.verify_grad(aet.softplus, [np.random.rand(3, 4)]) utt.verify_grad(aet.softplus, [np.random.rand(3, 4)])
def test_accuracy(self): def test_accuracy(self):
# Test all aproximations are working (cutoff points are -37, 18, 33.3) # Test all approximations are working (cutoff points are -37, 18, 33.3)
x_test = np.array([-40.0, -17.5, 17.5, 18.5, 40.0]) x_test = np.array([-40.0, -17.5, 17.5, 18.5, 40.0])
y_th = aet.softplus(x_test).eval() y_th = aet.softplus(x_test).eval()
y_np = np.log1p(np.exp(x_test)) y_np = np.log1p(np.exp(x_test))
......
...@@ -164,7 +164,7 @@ class HiddenLayer: ...@@ -164,7 +164,7 @@ class HiddenLayer:
""" """
self.input = input self.input = input
# `W` is initialized with `W_values` which is uniformely sampled # `W` is initialized with `W_values` which is uniformly sampled
# from -6./sqrt(n_in+n_hidden) and 6./sqrt(n_in+n_hidden) # from -6./sqrt(n_in+n_hidden) and 6./sqrt(n_in+n_hidden)
# the output of uniform if converted using asarray to dtype # the output of uniform if converted using asarray to dtype
# aesara.config.floatX so that the code is runable on GPU # aesara.config.floatX so that the code is runable on GPU
...@@ -188,7 +188,7 @@ class MLP: ...@@ -188,7 +188,7 @@ class MLP:
A multilayer perceptron is a feedforward artificial neural network model A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations. that has one layer or more of hidden units and nonlinear activations.
Intermidiate layers usually have as activation function thanh or the Intermediate layers usually have as activation function thanh or the
sigmoid function (defined here by a ``SigmoidalLayer`` class) while the sigmoid function (defined here by a ``SigmoidalLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression`` top layer is a softamx layer (defined here by a ``LogisticRegression``
class). class).
......
...@@ -52,7 +52,7 @@ def test_pseudoinverse_correctness(): ...@@ -52,7 +52,7 @@ def test_pseudoinverse_correctness():
assert ri.shape[0] == r.shape[1] assert ri.shape[0] == r.shape[1]
assert ri.shape[1] == r.shape[0] assert ri.shape[1] == r.shape[0]
assert ri.dtype == r.dtype assert ri.dtype == r.dtype
# Note that pseudoinverse can be quite unprecise so I prefer to compare # Note that pseudoinverse can be quite imprecise so I prefer to compare
# the result with what np.linalg returns # the result with what np.linalg returns
assert _allclose(ri, np.linalg.pinv(r)) assert _allclose(ri, np.linalg.pinv(r))
......
...@@ -327,7 +327,7 @@ def makeSharedTester( ...@@ -327,7 +327,7 @@ def makeSharedTester(
if x.__class__.__name__ != "csr_matrix": if x.__class__.__name__ != "csr_matrix":
# sparse matrix don't support inplace affectation # sparse matrix don't support inplace affectation
nd += 1 nd += 1
# THIS DOENS'T DO WHAT WE EXPECT the content of a is # THIS DOESN'T DO WHAT WE EXPECT the content of a is
# not updated for GpuArray, but it is for ndarray # not updated for GpuArray, but it is for ndarray
x_shared.get_value(borrow=True)[:] = nd x_shared.get_value(borrow=True)[:] = nd
assert may_share_memory(old_data, x_shared.container.storage[0]) assert may_share_memory(old_data, x_shared.container.storage[0])
...@@ -347,7 +347,7 @@ def makeSharedTester( ...@@ -347,7 +347,7 @@ def makeSharedTester(
) )
# Test by set_value with borrow=False when new data cast. # Test by set_value with borrow=False when new data cast.
# specificaly useful for gpu data # specifically useful for gpu data
nd += 1 nd += 1
old_data = x_shared.container.storage[0] old_data = x_shared.container.storage[0]
x_shared.set_value(self.cast_value(nd), borrow=False) x_shared.set_value(self.cast_value(nd), borrow=False)
......
...@@ -142,7 +142,7 @@ def test_eigvalsh(): ...@@ -142,7 +142,7 @@ def test_eigvalsh():
refw = scipy.linalg.eigvalsh(a, b) refw = scipy.linalg.eigvalsh(a, b)
np.testing.assert_array_almost_equal(w, refw) np.testing.assert_array_almost_equal(w, refw)
# We need to test None separatly, as otherwise DebugMode will # We need to test None separately, as otherwise DebugMode will
# complain, as this isn't a valid ndarray. # complain, as this isn't a valid ndarray.
b = None b = None
B = aet.NoneConst B = aet.NoneConst
......
...@@ -8,7 +8,7 @@ from aesara.tensor.utils import hash_from_ndarray, shape_of_variables ...@@ -8,7 +8,7 @@ from aesara.tensor.utils import hash_from_ndarray, shape_of_variables
def test_hash_from_ndarray(): def test_hash_from_ndarray():
hashs = [] hashes = []
rng = np.random.rand(5, 5) rng = np.random.rand(5, 5)
for data in [ for data in [
...@@ -37,9 +37,9 @@ def test_hash_from_ndarray(): ...@@ -37,9 +37,9 @@ def test_hash_from_ndarray():
rng[::-1], rng[::-1],
]: ]:
data = np.asarray(data) data = np.asarray(data)
hashs.append(hash_from_ndarray(data)) hashes.append(hash_from_ndarray(data))
assert len(set(hashs)) == len(hashs) assert len(set(hashes)) == len(hashes)
# test that different type of views and their copy give the same hash # test that different type of views and their copy give the same hash
assert hash_from_ndarray(rng[1:]) == hash_from_ndarray(rng[1:].copy()) assert hash_from_ndarray(rng[1:]) == hash_from_ndarray(rng[1:].copy())
......
...@@ -901,7 +901,7 @@ class TestDisconnectedGrad: ...@@ -901,7 +901,7 @@ class TestDisconnectedGrad:
x = matrix("x") x = matrix("x")
# This MUST raise a DisconnectedInputError error. # This MUST raise a DisconnectedInputError error.
# This also rasies an additional warning from gradients.py. # This also raises an additional warning from gradients.py.
with pytest.raises(DisconnectedInputError): with pytest.raises(DisconnectedInputError):
grad(disconnected_grad(x).sum(), x) grad(disconnected_grad(x).sum(), x)
...@@ -912,7 +912,7 @@ class TestDisconnectedGrad: ...@@ -912,7 +912,7 @@ class TestDisconnectedGrad:
b = matrix("b") b = matrix("b")
y = a + disconnected_grad(b) y = a + disconnected_grad(b)
# This MUST raise a DisconnectedInputError error. # This MUST raise a DisconnectedInputError error.
# This also rasies an additional warning from gradients.py. # This also raises an additional warning from gradients.py.
with pytest.raises(DisconnectedInputError): with pytest.raises(DisconnectedInputError):
grad(y.sum(), b) grad(y.sum(), b)
......
...@@ -61,7 +61,7 @@ break_op = BreakRop() ...@@ -61,7 +61,7 @@ break_op = BreakRop()
class RopLopChecker: class RopLopChecker:
""" """
Don't peform any test, but provide the function to test the Don't perform any test, but provide the function to test the
Rop to class that inherit from it. Rop to class that inherit from it.
""" """
......
...@@ -111,7 +111,7 @@ class TestTypedListType: ...@@ -111,7 +111,7 @@ class TestTypedListType:
assert myNestedType2 != myNestedType3 assert myNestedType2 != myNestedType3
def test_nested_list_arg(self): def test_nested_list_arg(self):
# test for the 'depth' optionnal argument # test for the 'depth' optional argument
myNestedType = TypedListType( myNestedType = TypedListType(
TensorType(aesara.config.floatX, (False, False)), 3 TensorType(aesara.config.floatX, (False, False)), 3
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论