提交 bcd5d52e authored 作者: amrithasuresh's avatar amrithasuresh

1. Updated numpy as np

2. Fixed indentation
上级 f1bf4253
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import unittest
import warnings
......@@ -24,7 +24,7 @@ def makeSharedTester(shared_constructor_,
test_internal_type_,
theano_fct_,
ref_fct_,
cast_value_=numpy.asarray,
cast_value_=np.asarray,
op_by_matrix_=False,
name=None,
):
......@@ -82,8 +82,8 @@ def makeSharedTester(shared_constructor_,
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x = numpy.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
x = self.cast_value(x)
x_ref = self.ref_fct(x)
......@@ -94,17 +94,17 @@ def makeSharedTester(shared_constructor_,
total_val = total_func()
assert numpy.allclose(self.ref_fct(x), total_val)
assert np.allclose(self.ref_fct(x), total_val)
values_to_div = .5
if self.op_by_matrix:
values_to_div = self.internal_type(numpy.ones(x.shape, dtype=dtype)/2) # supported for cudandarray, but not ndarray.
values_to_div = self.internal_type(np.ones(x.shape, dtype=dtype)/2) # supported for cudandarray, but not ndarray.
assert self.test_internal_type(values_to_div)
x /= values_to_div
total_val_2 = total_func()
# value used to construct should not alias with internal
assert numpy.allclose(total_val, total_val_2)
assert np.allclose(total_val, total_val_2)
x = x_shared.get_value(borrow=False)
......@@ -113,7 +113,7 @@ def makeSharedTester(shared_constructor_,
total_val_3 = total_func()
# value returned by access should not alias with internal
assert numpy.allclose(total_val, total_val_3)
assert np.allclose(total_val, total_val_3)
# in this case we can alias
x = x_shared.get_value(borrow=True)
......@@ -122,17 +122,17 @@ def makeSharedTester(shared_constructor_,
# this is not required by the contract but it is a feature we've
# implemented for some type of SharedVariable.
if self.get_value_borrow_true_alias:
assert numpy.allclose(self.ref_fct(x), total_func())
assert np.allclose(self.ref_fct(x), total_func())
else:
assert numpy.allclose(x_ref, total_func())
assert np.allclose(x_ref, total_func())
def test_shape(self):
dtype = self.dtype
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x = numpy.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
x = self.cast_value(x)
x_ref = self.ref_fct(x)
......@@ -142,7 +142,7 @@ def makeSharedTester(shared_constructor_,
f = theano.function([], x_shared.shape)
topo = f.maker.fgraph.toposort()
assert numpy.all(f() == (2, 4))
assert np.all(f() == (2, 4))
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 3
assert isinstance(topo[0].op, tensor.opt.Shape_i)
......@@ -154,8 +154,8 @@ def makeSharedTester(shared_constructor_,
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x = numpy.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
x = self.cast_value(x)
x_ref = self.ref_fct(x)
......@@ -165,7 +165,7 @@ def makeSharedTester(shared_constructor_,
f = theano.function([], x_shared.shape[1])
topo = f.maker.fgraph.toposort()
assert numpy.all(f() == (4))
assert np.all(f() == (4))
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 1
assert isinstance(topo[0].op, tensor.opt.Shape_i)
......@@ -175,8 +175,8 @@ def makeSharedTester(shared_constructor_,
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x = numpy.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
x = self.cast_value(x)
x_ref = self.ref_fct(x)
......@@ -193,12 +193,12 @@ def makeSharedTester(shared_constructor_,
if self.op_by_matrix:
# supported for cudandarray, but not ndarray.
values_to_div = self.internal_type(
numpy.ones(x.shape, dtype=dtype)/2)
np.ones(x.shape, dtype=dtype)/2)
x /= values_to_div # supported by ndarray and CudaNdarray
# this is not required by the contract but it is a feature we can
# implement for some type of SharedVariable.
assert numpy.allclose(self.ref_fct(x), total_func())
assert np.allclose(self.ref_fct(x), total_func())
x = x_shared.get_value(borrow=False, return_internal_type=True)
assert self.test_internal_type(x)
......@@ -206,7 +206,7 @@ def makeSharedTester(shared_constructor_,
x /= values_to_div # supported by ndarray and CudaNdarray
# this is required by the contract
assert not numpy.allclose(self.ref_fct(x), total_func())
assert not np.allclose(self.ref_fct(x), total_func())
def test_get_value(self):
"""
......@@ -216,8 +216,8 @@ def makeSharedTester(shared_constructor_,
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x_orig = numpy.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
x_orig = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
x_cast = self.cast_value(x_orig)
if self.shared_constructor_accept_ndarray:
x_shared = self.shared_constructor(x_orig, borrow=False)
......@@ -231,8 +231,8 @@ def makeSharedTester(shared_constructor_,
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x = numpy.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
x = self.cast_value(x)
x_orig = x
......@@ -247,7 +247,7 @@ def makeSharedTester(shared_constructor_,
values_to_div = .5
if self.op_by_matrix:
# supported for cudandarray, but not ndarray.
values_to_div = self.internal_type(numpy.ones(x.shape, dtype=dtype)/2)
values_to_div = self.internal_type(np.ones(x.shape, dtype=dtype)/2)
assert self.test_internal_type(values_to_div)
# test if that theano shared variable optimize set_value(borrow=True)
......@@ -260,7 +260,7 @@ def makeSharedTester(shared_constructor_,
assert x is get_x
else:
assert x is not get_x
assert numpy.allclose(self.ref_fct(numpy.asarray(x_orig)/.5), self.ref_fct(x))
assert np.allclose(self.ref_fct(np.asarray(x_orig)/.5), self.ref_fct(x))
# test optimized get set value on the gpu(don't pass data to the cpu)
get_x = x_shared.get_value(borrow=True, return_internal_type=True)
......@@ -280,8 +280,8 @@ def makeSharedTester(shared_constructor_,
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x = numpy.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
x = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
x = self.cast_value(x)
x_ref = self.ref_fct(x)
......@@ -293,20 +293,20 @@ def makeSharedTester(shared_constructor_,
total_val = total_func()
assert numpy.allclose(self.ref_fct(x), total_val)
assert np.allclose(self.ref_fct(x), total_val)
values_to_div = .5
if self.op_by_matrix:
# supported for cudandarray, but not ndarray.
values_to_div = self.internal_type(numpy.ones(x.shape, dtype=dtype)/2)
values_to_div = self.internal_type(np.ones(x.shape, dtype=dtype)/2)
assert self.test_internal_type(values_to_div)
x /= values_to_div
# not required by the contract but it is a feature we've implemented
if self.shared_borrow_true_alias:
assert numpy.allclose(self.ref_fct(x), total_func())
assert np.allclose(self.ref_fct(x), total_func())
else:
assert numpy.allclose(x_ref, total_func())
assert np.allclose(x_ref, total_func())
def test_inplace_set_value(self):
"""
......@@ -319,25 +319,25 @@ def makeSharedTester(shared_constructor_,
shp = (100//4, 1024) # 100KB
x = numpy.zeros(shp, dtype=dtype)
x = np.zeros(shp, dtype=dtype)
x = self.cast_value(x)
x_shared = self.shared_constructor(x, borrow=True)
old_data = x_shared.container.storage[0]
nd = numpy.ones(shp, dtype=dtype)
nd = np.ones(shp, dtype=dtype)
if x.__class__.__name__ != 'csr_matrix':
# sparse matrix don't support inplace affectation
x_shared.container.value[:] = nd
assert (numpy.asarray(x_shared.get_value(borrow=True)) == nd).all()
assert (np.asarray(x_shared.get_value(borrow=True)) == nd).all()
# This should always share value!
assert may_share_memory(old_data, x_shared.container.storage[0])
assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))
nd[0] += 1
x_shared.container.value[0] = nd[0]
assert (numpy.asarray(x_shared.get_value(borrow=True)[0]) == nd[0]).all()
assert (numpy.asarray(x_shared.get_value(borrow=True)[1:]) == nd[1:]).all()
assert (np.asarray(x_shared.get_value(borrow=True)[0]) == nd[0]).all()
assert (np.asarray(x_shared.get_value(borrow=True)[1:]) == nd[1:]).all()
# This should always share value!
assert may_share_memory(old_data, x_shared.container.storage[0])
assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))
......@@ -347,7 +347,7 @@ def makeSharedTester(shared_constructor_,
nd += 1
# THIS DON't DO WHAT WE EXPECT the contain of a is not updated for CudaNdarray, but it is for ndarray
x_shared.get_value(borrow=True)[:] = nd
#assert (numpy.asarray(x_shared.get_value(borrow=True))!=nd).all()
#assert (np.asarray(x_shared.get_value(borrow=True))!=nd).all()
assert may_share_memory(old_data, x_shared.container.storage[0])
x_shared.get_value(borrow=True)
......@@ -355,7 +355,7 @@ def makeSharedTester(shared_constructor_,
nd += 1
old_data = x_shared.container.storage[0]
x_shared.set_value(nd, borrow=False)
assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
assert np.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
self.ref_fct(self.cast_value(nd)))
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace
......@@ -364,7 +364,7 @@ def makeSharedTester(shared_constructor_,
nd += 1
old_data = x_shared.container.storage[0]
x_shared.set_value(self.cast_value(nd), borrow=False)
assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
assert np.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
self.ref_fct(self.cast_value(nd)))
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace
......@@ -372,7 +372,7 @@ def makeSharedTester(shared_constructor_,
nd += 1
old_data = x_shared.container.storage[0]
x_shared.set_value(nd.copy(), borrow=True)
assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
assert np.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
self.ref_fct(self.cast_value(nd)))
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace
......@@ -380,7 +380,7 @@ def makeSharedTester(shared_constructor_,
nd += 1
old_data = x_shared.container.storage[0]
x_shared.set_value(self.cast_value(nd.copy()), borrow=True)
assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)), self.ref_fct(self.cast_value(nd)))
assert np.allclose(self.ref_fct(x_shared.get_value(borrow=True)), self.ref_fct(self.cast_value(nd)))
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace
def test_specify_shape(self):
......@@ -388,19 +388,19 @@ def makeSharedTester(shared_constructor_,
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x1_1 = numpy.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
x1_1 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
x1_1 = self.cast_value(x1_1)
x1_2 = numpy.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
x1_2 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
x1_2 = self.cast_value(x1_2)
x2 = numpy.asarray(rng.uniform(1, 2, [4, 3]), dtype=dtype)
x2 = np.asarray(rng.uniform(1, 2, [4, 3]), dtype=dtype)
x2 = self.cast_value(x2)
# Test that we can replace with values of the same shape
x1_shared = self.shared_constructor(x1_1)
x1_specify_shape = tensor.specify_shape(x1_shared, x1_1.shape)
x1_shared.set_value(x1_2)
assert numpy.allclose(self.ref_fct(x1_shared.get_value(borrow=True)),
assert np.allclose(self.ref_fct(x1_shared.get_value(borrow=True)),
self.ref_fct( x1_2))
shape_op_fct = theano.function([], x1_shared.shape)
topo = shape_op_fct.maker.fgraph.toposort()
......@@ -412,14 +412,14 @@ def makeSharedTester(shared_constructor_,
# Test that we forward the input
specify_shape_fct = theano.function([], x1_specify_shape)
assert numpy.all(self.ref_fct(specify_shape_fct()) ==
assert np.all(self.ref_fct(specify_shape_fct()) ==
self.ref_fct(x1_2))
topo_specify = specify_shape_fct.maker.fgraph.toposort()
assert len(topo_specify) == 2
# Test that we put the shape info into the graph
shape_constant_fct = theano.function([], x1_specify_shape.shape)
assert numpy.all(shape_constant_fct() == shape_op_fct())
assert np.all(shape_constant_fct() == shape_op_fct())
topo_cst = shape_constant_fct.maker.fgraph.toposort()
if theano.config.mode != 'FAST_COMPILE':
assert len(topo_cst) == 1
......@@ -454,12 +454,12 @@ def makeSharedTester(shared_constructor_,
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x1_1 = numpy.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
x1_1 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
x1_1 = self.cast_value(x1_1)
x1_2 = numpy.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
x1_2 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
x1_2 = self.cast_value(x1_2)
x2 = numpy.asarray(rng.uniform(1, 2, [5, 2]), dtype=dtype)
x2 = np.asarray(rng.uniform(1, 2, [5, 2]), dtype=dtype)
x2 = self.cast_value(x2)
# Test that we can replace with values of the same shape
......@@ -468,7 +468,7 @@ def makeSharedTester(shared_constructor_,
(tensor.as_tensor_variable(x1_1.shape[0]),
x1_shared.shape[1]))
x1_shared.set_value(x1_2)
assert numpy.allclose(
assert np.allclose(
self.ref_fct(x1_shared.get_value(borrow=True)),
self.ref_fct( x1_2))
shape_op_fct = theano.function([], x1_shared.shape)
......@@ -484,7 +484,7 @@ def makeSharedTester(shared_constructor_,
specify_shape_fct = theano.function([], x1_specify_shape)
specify_shape_fct()
# theano.printing.debugprint(specify_shape_fct)
assert numpy.all(self.ref_fct(specify_shape_fct())
assert np.all(self.ref_fct(specify_shape_fct())
== self.ref_fct(x1_2))
topo_specify = specify_shape_fct.maker.fgraph.toposort()
if theano.config.mode != 'FAST_COMPILE':
......@@ -493,7 +493,7 @@ def makeSharedTester(shared_constructor_,
# Test that we put the shape info into the graph
shape_constant_fct = theano.function([], x1_specify_shape.shape)
# theano.printing.debugprint(shape_constant_fct)
assert numpy.all(shape_constant_fct() == shape_op_fct())
assert np.all(shape_constant_fct() == shape_op_fct())
topo_cst = shape_constant_fct.maker.fgraph.toposort()
if theano.config.mode != 'FAST_COMPILE':
assert len(topo_cst) == 2
......@@ -516,14 +516,14 @@ def makeSharedTester(shared_constructor_,
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
a = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
rng = np.random.RandomState(utt.fetch_seed())
a = np.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
a = self.cast_value(a)
a_shared = self.shared_constructor(a)
b = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
b = np.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
b = self.cast_value(b)
b_shared = self.shared_constructor(b)
s = numpy.zeros((40, 40), dtype=dtype)
s = np.zeros((40, 40), dtype=dtype)
s = self.cast_value(s)
s_shared = self.shared_constructor(s)
f = theano.function([],
......@@ -546,7 +546,7 @@ def makeSharedTester(shared_constructor_,
+ s_shared_specify)])
topo = f.maker.fgraph.toposort()
shp = f()
assert numpy.all(shp == (40, 40))
assert np.all(shp == (40, 40))
if theano.config.mode != 'FAST_COMPILE':
assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
assert all(node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm))
......@@ -562,7 +562,7 @@ def makeSharedTester(shared_constructor_,
+ s_shared_specify)])
topo = f.maker.fgraph.toposort()
shp = f()
assert numpy.all(shp == (40, 40))
assert np.all(shp == (40, 40))
if theano.config.mode != 'FAST_COMPILE':
assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
assert all(node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm))
......@@ -578,9 +578,9 @@ def makeSharedTester(shared_constructor_,
shp = (1024, 1024)
# Test the case with all zeros element
rng = numpy.random.RandomState(utt.fetch_seed())
for x in [numpy.asarray(rng.rand(*shp), dtype=dtype),
numpy.zeros(shp, dtype=dtype)]:
rng = np.random.RandomState(utt.fetch_seed())
for x in [np.asarray(rng.rand(*shp), dtype=dtype),
np.zeros(shp, dtype=dtype)]:
zeros = (x == 0).all()
x = self.cast_value(x)
x_shared = self.shared_constructor(x, borrow=True)
......@@ -592,7 +592,7 @@ def makeSharedTester(shared_constructor_,
assert x_shared.type.values_eq(x, x)
assert x_shared.type.values_eq_approx(x, x)
if not zeros:
assert not numpy.allclose(self.ref_fct(x), self.ref_fct(y))
assert not np.allclose(self.ref_fct(x), self.ref_fct(y))
assert not x_shared.type.values_eq(x, y)
assert not x_shared.type.values_eq_approx(x, y)
......@@ -612,11 +612,11 @@ test_shared_options = makeSharedTester(
set_value_inplace_=False,
set_cast_value_inplace_=False,
shared_constructor_accept_ndarray_=True,
internal_type_=numpy.ndarray,
test_internal_type_=lambda a: isinstance(a, numpy.ndarray),
internal_type_=np.ndarray,
test_internal_type_=lambda a: isinstance(a, np.ndarray),
theano_fct_=lambda a: a*2,
ref_fct_=lambda a: numpy.asarray((a*2)),
cast_value_=numpy.asarray,
ref_fct_=lambda a: np.asarray((a*2)),
cast_value_=np.asarray,
op_by_matrix_=False,
name='test_shared_options')
......@@ -624,4 +624,4 @@ test_shared_options = makeSharedTester(
def test_scalar_shared_options():
# Simple test to make sure we do not loose that fonctionality.
theano.shared(value=0., name='lk', borrow=True)
theano.shared(value=numpy.float32(0.), name='lk', borrow=True)
theano.shared(value=np.float32(0.), name='lk', borrow=True)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论