提交 5ca50a4f authored 作者: James Bergstra's avatar James Bergstra

GpuElemwise - removed RecAlgo dead code

上级 f4c3e9a2
...@@ -34,188 +34,6 @@ def get_str_list_logical_scalar(node, value_str='ii_i%i_value', data_str='ii_i%i ...@@ -34,188 +34,6 @@ def get_str_list_logical_scalar(node, value_str='ii_i%i_value', data_str='ii_i%i
else: l+=[data_str%ipos] else: l+=[data_str%ipos]
return l return l
class RecAlgo(object):
def c_src_kernel(self, node, nodename):
nd = node.outputs[0].type.ndim
sio = StringIO.StringIO()
#print 'C_SRC_KERNEL', sio.getvalue()
for ipos, i in enumerate(node.inputs):
print >> sio, "// Input ", ipos, str(i.type)
for ipos, i in enumerate(node.outputs):
print >> sio, "// Output ", ipos, str(i.type)
print >> sio, "static __global__ void kernel_%s_%s(unsigned int numEls" %(self.scalar_op.__class__.__name__,nodename)
if (nd):
print >> sio, "\t,", ", ".join("unsigned int log2_dim%i" % i for i in xrange(nd))
#declare inputs
for ipos, i in enumerate(node.inputs):
s = ", ".join(["const float * i%i_data" % ipos] + list("int i%i_str_%i" % (ipos, d) for d in xrange(nd)))
print >> sio, "\t,", s
#declare outputs
for ipos, i in enumerate(node.outputs):
s = ", ".join(["float * o%i_data" % ipos] + list("int o%i_str_%i" % (ipos, d) for d in xrange(nd)))
print >> sio, "\t,", s
#print >> sio, "\t,", ", ".join("int o%i_str_%i" % (ipos, d) for d in xrange(nd))
#print >> sio, "\t,", "float * o%i_data" % ipos
print >> sio, "\t)\n{"
print >> sio, " const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;"
print >> sio, " const unsigned int numThreads = blockDim.x * gridDim.x;"
# For each input that is a scalar which has been broadcasted to a tensor,
# load it into a local variable
for ipos, i in enumerate(node.inputs):
if _logical_scalar(i):
print >> sio, " const float ii_i%i_value = i%i_data[0];" % (ipos, ipos)
#TODO: insert code to check for strides of 1, and use a different loop
#loop over the elements to be treated by this kernel call
print >> sio, " for (unsigned int i = idx; i < numEls; i += numThreads) {"
# calculate the data pointers for all arguments
print >> sio, " unsigned int ii = i;"
for ipos, i in enumerate(node.inputs):
if not _logical_scalar(i):
print >> sio, " const float * ii_i%i_data = i%i_data;" % (ipos, ipos)
for ipos, i in enumerate(node.outputs):
print >> sio, " float * ii_o%i_data = o%i_data;" % (ipos, ipos)
for d in xrange(nd-1, -1, -1):
if d > 0:
print >> sio, " unsigned int pos%i = INTMOD_POW2(ii, log2_dim%i);" %(d, d)
print >> sio, " ii = INTDIV_POW2(ii, log2_dim%i);" %d
else:
print >> sio, " unsigned int pos%i = ii;" %d
for ipos, i in enumerate(node.inputs):
if not _logical_scalar(i):
print >> sio, " ii_i%i_data += pos%i * i%i_str_%i;" % (ipos, d, ipos, d)
for ipos, i in enumerate(node.outputs):
print >> sio, " ii_o%i_data += pos%i * o%i_str_%i;" % (ipos, d, ipos, d)
# perform the scalar operation on the input and output references
#TODO: What if the scalar_op needs support_code??
task_code = self.scalar_op.c_code(
Apply(self.scalar_op,
[scalar.Scalar(dtype = input.type.dtype)() for input in node.inputs],
[scalar.Scalar(dtype = output.type.dtype)() for output in node.outputs])
, nodename + '_scalar_'
, get_str_list_logical_scalar(node)
, ['ii_o%i_data[0]'%ipos for ipos, i in enumerate(node.outputs)]
, sub=dict(fail='return;')) #TODO: set a failure code somehow!!!
print >> sio, " ", task_code
print >> sio, " }"
#TODO: insert runtime stride checks that select the best loop order either here, or in
# the host code that launched the kernel (host code probably better spot)
#indent = " "*(4*d+7)
#for ipos, i in enumerate(node.inputs):
#print >> sio, indent, "const float * i%i" % ipos, '= i%i_data', ''
print >> sio, "}"
#print sio.getvalue()
return sio.getvalue()
def c_src_callkernel(self, node, nodename):
nd = node.outputs[0].type.ndim
d = dict()
#input_params and output_params go into the function declaration/definition
input_params = ", ".join("const float * i%i_data, const int * i%i_str"%(ipos, ipos)
for ipos in xrange(len(node.inputs)))
output_params = ", ".join("float * o%i_data, const int * o%i_str"%(ipos, ipos)
for ipos in xrange(len(node.outputs)))
#input_args and output_args go into the recursive call.
input_args = ", ".join("i%i_data, i%i_str"%(ipos, ipos)
for ipos in xrange(len(node.inputs)))
output_args = ", ".join("o%i_data, o%i_str"%(ipos, ipos)
for ipos in xrange(len(node.outputs)))
# kernel_call_args are used to invoke the cuda kernel
kernel_call_args = ["numEls"]
kernel_call_args.extend("log2_dims[%i]"%di for di in xrange(nd))
for ipos in xrange(len(node.inputs)):
kernel_call_args.append(
", ".join(["i%i_data"%ipos] + list("i%i_str[%i]"%(ipos, di) for di in xrange(nd)))
)
#strides = ", ".join("i%i_str[%i]"%(ipos, di) for di in xrange(nd))
#kernel_call_args.append( "%s, i%i_data" % (strides, ipos))
for ipos in xrange(len(node.outputs)):
kernel_call_args.append(
", ".join(["o%i_data"%ipos] + list("o%i_str[%i]"%(ipos, di) for di in xrange(nd)))
)
#strides = ", ".join("o%i_str[%i]"%(ipos, di) for di in xrange(nd))
#kernel_call_args.append( "%s, o%i_data" % (strides, ipos))
kernel_call_args = ", ".join(kernel_call_args)
# the data_pointer_increments are inserted after each recursive call
data_ptr_inc = []
for ipos in xrange(len(node.inputs)):
data_ptr_inc.append("i%i_data += (1<< log2_dim) * i%i_str[d]" %(ipos, ipos))
for ipos in xrange(len(node.outputs)):
data_ptr_inc.append("o%i_data += (1<< log2_dim) * o%i_str[d]" %(ipos, ipos))
data_ptr_inc = ";\n".join(data_ptr_inc)
d.update(locals())
d["scalar_op"]=self.scalar_op.__class__.__name__
return """
static void callkernel_%(nodename)s(const unsigned int numEls, const int d,
const int * dims, int * log2_dims,
%(input_params)s,
%(output_params)s)
{
if (d == %(nd)s)
{
int threads_per_block = std::min(numEls, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
//a ceil would be better here
int n_blocks = std::min(numEls/threads_per_block + (numEls %% threads_per_block?1:0), (unsigned int)NUM_VECTOR_OP_BLOCKS);
kernel_%(scalar_op)s_%(nodename)s<<<n_blocks, threads_per_block>>>(%(kernel_call_args)s);
//std::cerr << "ADDCALL a str" << i0_str[0] << " "<< i0_str[1] << "\\n";
//std::cerr << "ADDCALL a data" << i0_data << "\\n";
//std::cerr << "ADDCALL b str" << i1_str[0] << " "<< i1_str[1] << "\\n";
//std::cerr << "ADDCALL b data" << i1_data << "\\n";
//std::cerr << "ADDCALL z str" << o0_str[0] << " "<< o0_str[1] << "\\n";
//std::cerr << "ADDCALL z data" << o0_data << "\\n";
}
else
{
//std::cerr << "_ADDCALL d " << d << "\\n";
unsigned int dim_d = dims[d];
//std::cerr << "_ADDCALL dim_d " << dim_d << "\\n";
int log2_dim = 0;
while(dim_d)
{
//std::cerr << "___ADDCALL d " << d << " " << dim_d << "\\n";
if (dim_d&1)
{
log2_dims[d] = log2_dim;
//std::cerr << "___ADDCALL a str" << i0_str[0] << " "<< i0_str[1] << "\\n";
//std::cerr << "___ADDCALL a data" << i0_data << "\\n";
//std::cerr << "___ADDCALL b str" << i1_str[0] << " "<< i1_str[1] << "\\n";
//std::cerr << "___ADDCALL b data" << i1_data << "\\n";
//std::cerr << "___ADDCALL z str" << o0_str[0] << " "<< o0_str[1] << "\\n";
//std::cerr << "___ADDCALL z data" << o0_data << "\\n";
callkernel_%(nodename)s(numEls * (1<<log2_dim), d+1, dims, log2_dims,
%(input_args)s,
%(output_args)s);
%(data_ptr_inc)s;
//i0_data += (1 << log2_dim) * i0_str[d];
//i1_data += (1 << log2_dim) * i1_str[d];
//o0_data += (1 << log2_dim) * o0_str[d];
}
log2_dim += 1;
dim_d >>= 1;
}
}
}
""" %d
def c_support_code_apply(self, node, nodename):
return self.c_src_kernel(node, nodename) + self.c_src_callkernel(node, nodename)
class NaiveAlgo(object): class NaiveAlgo(object):
verbose = 0 # 1, 2 or 3 for more verbose output. verbose = 0 # 1, 2 or 3 for more verbose output.
cache_version = ('debug', 7, verbose) cache_version = ('debug', 7, verbose)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论