def run_gradweight(self, inputs_shape, filters_shape, output_shape, ref=dnn_gradweight, subsample=(1, 1), filter_flip=True, verify_grad=True, mode=mode_without_gpu, border_mode='valid', device='cpu', provide_shape=False, target_op=None): inputs_val = numpy.random.random(inputs_shape).astype('float32') output_val = numpy.random.random(output_shape).astype('float32') if device == 'gpu': inputs = gpu_shared(inputs_val) output = gpu_shared(output_val) else: inputs = theano.tensor.as_tensor_variable(cpu_shared(inputs_val)) output = theano.tensor.as_tensor_variable(cpu_shared(output_val)) if provide_shape: imshp = inputs_shape kshp = filters_shape else: imshp = None kshp = None if filter_flip: conv_mode = 'conv' else: conv_mode = 'cross' c = conv.AbstractConv2d_gradWeights(border_mode=border_mode, filter_flip=filter_flip, subsample=subsample, imshp=imshp, kshp=kshp) c = c(inputs, output, filters_shape[-2:]) c_ref = ref(inputs, output, filters_shape, border_mode=border_mode, subsample=subsample, conv_mode=conv_mode) f = theano.function([], c, mode) f_ref = theano.function([], c_ref, mode) if target_op is not None: assert any([isinstance(n.op, target_op) for n in f.maker.fgraph.toposort()]) res_ref = numpy.array(f_ref()) res = numpy.array(f()) utt.assert_allclose(res_ref, res) def abstract_conv2d_gradweight(inputs_val, output_val): conv_op = conv.AbstractConv2d_gradWeights(border_mode=border_mode, subsample=subsample) return conv_op(inputs_val, output_val, filters_shape[-2:]) if verify_grad: utt.verify_grad(abstract_conv2d_gradweight, [inputs_val, output_val], mode=mode, eps=1)
def run_fwd(self, inputs_shape, filters_shape, ref=dnn_conv, subsample=(1, 1), verify_grad=True, mode=mode_without_gpu, border_mode='valid', filter_flip=True, device='cpu', provide_shape=False, target_op=None): inputs_val = numpy.random.random(inputs_shape).astype('float32') filters_val = numpy.random.random(filters_shape).astype('float32') if device == 'gpu': inputs = gpu_shared(inputs_val) filters = gpu_shared(filters_val) else: inputs = theano.tensor.as_tensor_variable(cpu_shared(inputs_val)) filters = theano.tensor.as_tensor_variable(cpu_shared(filters_val)) if provide_shape: imshp = inputs_shape kshp = filters_shape else: imshp = None kshp = None if filter_flip: conv_mode = 'conv' else: conv_mode = 'cross' c_ref = ref(inputs, filters, border_mode=border_mode, subsample=subsample, conv_mode=conv_mode) c = conv.conv2d(inputs, filters, border_mode=border_mode, subsample=subsample, filter_flip=filter_flip, input_shape=imshp, filter_shape=kshp) f_ref = theano.function([], c_ref, mode=mode) f = theano.function([], c, mode) if target_op is not None: assert any([isinstance(n.op, target_op) for n in f.maker.fgraph.toposort()]) res_ref = numpy.array(f_ref()) res = numpy.array(f()) utt.assert_allclose(res_ref, res) if verify_grad: utt.verify_grad(conv.AbstractConv2d(border_mode="valid", imshp=imshp, kshp=kshp, subsample=subsample), [inputs_val, filters_val], mode=mode)
def run_gradinput(self, inputs_shape, filters_shape, output_shape, ref=dnn_gradinput, subsample=(1, 1), filter_flip=True, verify_grad=True, mode=mode_without_gpu, border_mode='valid', device='cpu', provide_shape=False): output_val = numpy.random.random(output_shape).astype('float32') filters_val = numpy.random.random(filters_shape).astype('float32') if device == 'gpu': output = gpu_shared(output_val) filters = gpu_shared(filters_val) else: output = theano.tensor.as_tensor_variable(cpu_shared(output_val)) filters = theano.tensor.as_tensor_variable(cpu_shared(filters_val)) if provide_shape: imshp = inputs_shape kshp = filters_shape else: imshp = None kshp = None if filter_flip: conv_mode = 'conv' else: conv_mode = 'cross' c = conv.AbstractConv2d_gradInputs(border_mode=border_mode, subsample=subsample, filter_flip=filter_flip, imshp=imshp, kshp=kshp) c = c(filters, output, inputs_shape[-2:]) c_ref = ref(filters, output, inputs_shape, border_mode=border_mode, subsample=subsample, conv_mode=conv_mode) f = theano.function([], c, mode) f_ref = theano.function([], c_ref, mode) res_ref = numpy.array(f_ref()) res = numpy.array(f()) utt.assert_allclose(res_ref, res) def abstract_conv2d_gradinputs(filters_val, output_val): conv_op = conv.AbstractConv2d_gradInputs(border_mode=border_mode, subsample=subsample) return conv_op(filters_val, output_val, inputs_shape[-2:]) if verify_grad: utt.verify_grad(abstract_conv2d_gradinputs, [filters_val, output_val], mode=mode, eps=1)
def run_fwd(self, inputs_shape, filters_shape, ref=dnn_conv, subsample=(1, 1), verify_grad=True, mode=mode_without_gpu, border_mode='valid', filter_flip=True, device='cpu', provide_shape=False, target_op=None): inputs_val = numpy.random.random(inputs_shape).astype('float32') filters_val = numpy.random.random(filters_shape).astype('float32') if device == 'gpu': inputs = gpu_shared(inputs_val) filters = gpu_shared(filters_val) else: inputs = theano.tensor.as_tensor_variable(cpu_shared(inputs_val)) filters = theano.tensor.as_tensor_variable(cpu_shared(filters_val)) if provide_shape: imshp = inputs_shape kshp = filters_shape else: imshp = None kshp = None if filter_flip: conv_mode = 'conv' else: conv_mode = 'cross' c_ref = ref(inputs, filters, border_mode=border_mode, subsample=subsample, conv_mode=conv_mode) c = conv.conv2d(inputs, filters, border_mode=border_mode, subsample=subsample, filter_flip=filter_flip, input_shape=imshp, filter_shape=kshp) f_ref = theano.function([], c_ref, mode=mode) f = theano.function([], c, mode) if target_op is not None: assert any([ isinstance(n.op, target_op) for n in f.maker.fgraph.toposort() ]) res_ref = numpy.array(f_ref()) res = numpy.array(f()) utt.assert_allclose(res_ref, res) if verify_grad: utt.verify_grad(conv.AbstractConv2d(border_mode="valid", imshp=imshp, kshp=kshp, subsample=subsample), [inputs_val, filters_val], mode=mode)
def run_gradinput(self, inputs_shape, filters_shape, output_shape, ref=dnn_gradinput, subsample=(1, 1), filter_flip=True, verify_grad=True, mode=mode_without_gpu, border_mode='valid', device='cpu', provide_shape=False, target_op=None): output_val = numpy.random.random(output_shape).astype('float32') filters_val = numpy.random.random(filters_shape).astype('float32') if device == 'gpu': output = gpu_shared(output_val) filters = gpu_shared(filters_val) else: output = theano.tensor.as_tensor_variable(cpu_shared(output_val)) filters = theano.tensor.as_tensor_variable(cpu_shared(filters_val)) if provide_shape: imshp = inputs_shape kshp = filters_shape else: imshp = None kshp = None if filter_flip: conv_mode = 'conv' else: conv_mode = 'cross' c = conv.AbstractConv2d_gradInputs(border_mode=border_mode, subsample=subsample, filter_flip=filter_flip, imshp=imshp, kshp=kshp) c = c(filters, output, inputs_shape[-2:]) c_ref = ref(filters, output, inputs_shape, border_mode=border_mode, subsample=subsample, conv_mode=conv_mode) f = theano.function([], c, mode) f_ref = theano.function([], c_ref, mode) if target_op is not None: assert any([ isinstance(n.op, target_op) for n in f.maker.fgraph.toposort() ]) res_ref = numpy.array(f_ref()) res = numpy.array(f()) utt.assert_allclose(res_ref, res) def abstract_conv2d_gradinputs(filters_val, output_val): conv_op = conv.AbstractConv2d_gradInputs(border_mode=border_mode, subsample=subsample) return conv_op(filters_val, output_val, inputs_shape[-2:]) if verify_grad: utt.verify_grad(abstract_conv2d_gradinputs, [filters_val, output_val], mode=mode, eps=1)