def test_softmax_optimizations_w_bias(self): x = tensor.matrix('x') b = tensor.vector('b') one_of_n = tensor.lvector('one_of_n') op = crossentropy_categorical_1hot xe = op(x, one_of_n) env = gof.Env( [x, b, one_of_n], [op(softmax(x+b), one_of_n)]) assert env.outputs[0].owner.op == op print 'BEFORE' for node in env.toposort(): print node.op print printing.pprint(node.outputs[0]) print '----' theano.compile.mode.optdb.query( theano.compile.mode.OPT_FAST_RUN).optimize(env) print 'AFTER' for node in env.toposort(): print node.op print printing.pprint(node.outputs[0]) print '====' assert len(env.toposort()) == 2 assert str(env.outputs[0].owner.op) == 'OutputGuard' assert env.outputs[0].owner.inputs[0].owner.op == crossentropy_softmax_argmax_1hot_with_bias
def test_softmax_optimizations_w_bias(self): x = tensor.matrix('x') b = tensor.vector('b') one_of_n = tensor.lvector('one_of_n') op = crossentropy_categorical_1hot xe = op(x, one_of_n) env = gof.Env([x, b, one_of_n], [op(softmax(x + b), one_of_n)]) assert env.outputs[0].owner.op == op print 'BEFORE' for node in env.toposort(): print node.op print printing.pprint(node.outputs[0]) print '----' theano.compile.mode.optdb.query( theano.compile.mode.OPT_FAST_RUN).optimize(env) print 'AFTER' for node in env.toposort(): print node.op print printing.pprint(node.outputs[0]) print '====' assert len(env.toposort()) == 2 assert str(env.outputs[0].owner.op) == 'OutputGuard' assert env.outputs[0].owner.inputs[ 0].owner.op == crossentropy_softmax_argmax_1hot_with_bias
def _test_Output(): print '\n------------------------------------------------------------' print 'Test: Output Layer' x = L.Input(2, name='X') output = L.Output() output.set_input('input', x, 'output') x.build() output.build() print P.pprint(output.output())
def _test_RepeatVector(): print '\n------------------------------------------------------------' print 'Test: Repeat Vector Layer' x = L.Input(2, name='X') f = L.RepeatVector(10) f.set_input('input', x, 'output') x.build() f.build() print P.pprint(f.get_output('output'))
def _test_repeat(): print '\n------------------------------------------------------------' print 'Test: dlx.util.theano_utils.repeat' x2 = TU.tensor(2, 'x2') x3 = TU.tensor(3, 'x3') y3 = TU.repeat(x2, 10) y4 = TU.repeat(x3, 10) print P.pprint(y3) print P.pprint(y4)
def _test_repeat(): print '\n------------------------------------------------------------' print 'Test: theano_variable' x2 = theano_variable(2, 'x2') x3 = theano_variable(3, 'x3') y3 = repeat(x2, 10) y4 = repeat(x3, 10) print P.pprint(y3) print P.pprint(y4)
def _test_Output(): print '\n------------------------------------------------------------' print 'Test: Output Unit' x = U.Input(2, name='X') output = U.Output() output.set_input('input', x, 'output') x.build() output.check() output.build() print P.pprint(output.get_results(train=False))
def _test_Dropout(): print '\n------------------------------------------------------------' print 'Test: Dropout layer' data_1 = L.Input(2, name='X') dropout = L.Dropout(0.2) dropout.set_input('input', data_1, 'output') data_1.build() dropout.build() print P.pprint(dropout.get_output('output', train=False)) print P.pprint(dropout.get_output('output', train=True))
def _test_Dense(): print '\n------------------------------------------------------------' print 'Test: Dense Unit' X = U.Input(2, name='X') dense_1 = U.Dense(16, 24, name='Dense1') dense_1.set_input('input', X, 'output') X.build() dense_1.check() dense_1.build() print P.pprint(dense_1.get_output('output')(train=False))
def _test_RepeatVector(): print '\n------------------------------------------------------------' print 'Test: Repeat Vector Unit' x = U.Input(2, name='X') f = U.RepeatVector(10) f.set_input('input', x, 'output') x.build() f.check() f.build() print P.pprint(f.get_output('output')(train=False))
def _test_Dense(): print '\n------------------------------------------------------------' print 'Test: Dense layer' data_1 = L.Input(2, name='Data1') dense_1 = L.Dense(16,24, name='Dense1') dense_1.set_function('activation', activation_linear) dense_1.set_function('init', init_glorot_uniform) dense_1.set_input('input', data_1, 'output') data_1.build() dense_1.build() print P.pprint(dense_1.get_output('output'))
def _test_Dropout(): print '\n------------------------------------------------------------' print 'Test: Dropout Unit' data_1 = U.Input(2, name='X') dropout = U.Dropout(0.2) dropout.set_input('input', data_1, 'output') data_1.build() dropout.check() dropout.build() print P.pprint(dropout.get_output('output')(train=False)) print P.pprint(dropout.get_output('output')(train=True))
def _test_Input(): print '\n------------------------------------------------------------' print 'Test: Input Unit' data1 = U.Input(2, name='Data1') data1.check() data1.build() print P.pprint(data1.get_variable()) data2 = U.Input(2) data2.check() data2.build() print P.pprint(data1.get_variable())
def _test_SimpleLambda(): print '\n------------------------------------------------------------' print 'Test: Simple Lambda Layer' x = L.Input(2, name='X') def fun(x): return x**2 f = L.SimpleLambda(fun) f.set_input('input', x, 'output') x.build() f.build() print P.pprint(f.get_output('output'))
def _test_Mask(): print '\n------------------------------------------------------------' print 'Test: Mask Unit' x = U.Input(3, name='X') mask = U.Mask() mask.set_input('input', x, 'output') output = U.Output() output.set_input('input', mask, 'mask') x.build() mask.check() mask.build() output.build() print P.pprint(output.get_results(train=False))
def _test_SimpleLambda(): print '\n------------------------------------------------------------' print 'Test: Simple Lambda Unit' x = U.Input(2, name='X') def fun(x): return x**2 f = U.SimpleLambda(fun) f.set_input('input', x, 'output') x.build() f.check() f.build() print P.pprint(f.get_output('output')(train=False))
def _test_Activation(): print '\n------------------------------------------------------------' print 'Test: Activation Layer' x = L.Input(2, name='X') relu = L.Activation() relu.set_function('activation', activation_relu) relu.set_input('input', x, 'output') softmax = L.Activation() softmax.set_function('activation', activation_softmax) softmax.set_input('input', x, 'output') x.build() relu.build() softmax.build() print P.pprint(relu.get_output('output')) print P.pprint(softmax.get_output('output'))
def _test_Activation(): print '\n------------------------------------------------------------' print 'Test: Activation Unit' x = U.Input(2, name='X') relu = U.Activation('relu') relu.set_input('input', x, 'output') softmax = U.Activation('softmax') softmax.set_input('input', x, 'output') x.build() relu.check() relu.build() softmax.check() softmax.build() print P.pprint(relu.get_output('output')(train=False)) print P.pprint(softmax.get_output('output')(train=False))
def _test_Input(): print '\n------------------------------------------------------------' print 'Test: Input layer' data1 = L.Input(2, name='Data1') data1.build() print P.pprint(data1.input()) data2 = L.Input(2) data2.build() print P.pprint(data2.input()) X = L.Input(3, name='X', mask_dim=2) X.build() print P.pprint(X.input()) print P.pprint(X.input_mask(train=True)) print X.input_mask(train=False)
def _test_function(): print '\n------------------------------------------------------------' print 'Test: activation, initialization functions' X = UL.theano_variable(2, 'X') linear = activation_linear(X) relu = activation_relu(X) softmax = activation_softmax(X) print P.pprint(linear) print P.pprint(relu) print P.pprint(softmax) W = init_glorot_uniform((16, 24), 'W') print(P.pprint(W))
def _test_Lambda(): print '\n------------------------------------------------------------' print 'Test: Lambda Layer' x = L.Input(2, name='X') y = L.Input(2, name='Y') def fun(x, y): return x*2, x+y, y*2 f = L.Lambda(fun, ['2x', 'x+y', '2y']) f.set_input('input_x', x, 'output') f.set_input('input_y', y, 'output') x.build() y.build() f.build() print P.pprint(f.get_output('2x')) print P.pprint(f.get_output('x+y')) print P.pprint(f.get_output('2y')) output1 = L.Output() output1.set_input('input', f, '2x') output1.build() print P.pprint(output1.output())
def _test_RNN1(): print '\n------------------------------------------------------------' print 'Test: RNN layer 1' X = L.Input(3, name='DATA3', mask_dim=2) rnn1 = R.RNN(3,1024,10, name='RNN1') rnn1.set_function('activation', activation_sigmoid) rnn1.set_function('init', init_glorot_uniform) rnn1.set_function('inner_init', init_orthogonal) rnn1.set_input('input_sequence', X, 'output') X.build() rnn1.build() print 'Test mask:', X.input_mask(train=False) print 'Test output_last:' print P.debugprint(rnn1.get_output('output_last', train=False)) print 'Test output_sequence:' print P.debugprint(rnn1.get_output('output_sequence', train=False)) print 'Train mask:', P.pprint(X.input_mask(train=True)) print 'Train output_last:' print P.debugprint(rnn1.get_output('output_last', train=True)) print 'Train output_sequence:' print P.debugprint(rnn1.get_output('output_sequence', train=True))
def _test_Lambda(): print '\n------------------------------------------------------------' print 'Test: Lambda Unit' x = U.Input(2, name='X') y = U.Input(2, name='Y') def fun(x, y): return x * 2, x + y, y * 2 f = U.Lambda(fun, ['2x', 'x+y', '2y']) f.set_input('input_x', x, 'output') f.set_input('input_y', y, 'output') x.build() y.build() f.check() f.build() print P.pprint(f.get_output('2x')(train=False)) print P.pprint(f.get_output('x+y')(train=False)) print P.pprint(f.get_output('2y')(train=False)) output1 = U.Output() output1.set_input('input', f, '2x') output1.build() print P.pprint(output1.get_results(train=False))
def pretty(self, **kwargs): self.resolve_all() if self.inputs: rval = 'inputs: %s\n' % ", ".join(map(str, self.inputs)) else: rval = '' if isinstance(self.outputs, (list, tuple)): inputs, outputs, updates = self.inputs, self.outputs else: inputs, outputs, updates = [self.outputs], self.updates #backport #inputs, outputs, updates = self.inputs, self.outputs if isinstance(self.outputs, (list, tuple)) else [self.outputs], self.updates # If mode is in kwargs, prints the optimized version of the method mode = kwargs.pop('mode', None) if mode: f = self.build(mode, {}, True) einputs, eoutputs = f.maker.fgraph.inputs, f.maker.fgraph.outputs updates = dict(((k, v) for k, v in zip(einputs[len(inputs):], eoutputs[len(outputs):]))) inputs, outputs = einputs[:len(inputs)], eoutputs[:len(outputs)] rval += pprint(inputs, outputs, updates, False) return rval
def pretty(self, **kwargs): self.resolve_all() if self.inputs: rval = 'inputs: %s\n' % ", ".join(map(str, self.inputs)) else: rval = '' if isinstance(self.outputs, (list, tuple)): inputs, outputs, updates = self.inputs, self.outputs else: inputs, outputs, updates = [self.outputs], self.updates #backport #inputs, outputs, updates = self.inputs, self.outputs if isinstance(self.outputs, (list, tuple)) else [self.outputs], self.updates # If mode is in kwargs, prints the optimized version of the method mode = kwargs.pop('mode', None) if mode: f = self.build(mode, {}, True) einputs, eoutputs = f.maker.env.inputs, f.maker.env.outputs updates = dict(((k, v) for k, v in zip(einputs[len(inputs):], eoutputs[len(outputs):]))) inputs, outputs = einputs[:len(inputs)], eoutputs[:len(outputs)] rval += pprint(inputs, outputs, updates, False) return rval
def print_graph(func): for i, node in enumerate(func.maker.env.toposort()): print i, node # Last node should be the output print i, printing.pprint(node.outputs[0]) print
def pretty(self, **kwargs): rval = super(External, self).pretty() if self.r.owner: rval += '\n= %s' % (pprint(self.r, dict(target = self.r))) return rval
'''Build Layers''' for layer in layers: layer.build() '''input, output''' # input of model X_train = data.input(train=True) X_test = data.input(train=False) # output of model y_train = output.output(train=True) y_test = output.output(train=False) mask_train = output.output_mask(train=True) # None in this example mask_test = output.output_mask(train=False) # None in this example print('X_train:', P.pprint(X_train)) print('X_test:', P.pprint(X_test)) print('y_train:') print(P.debugprint(y_train)) print('y_test:') print(P.debugprint(y_test)) '''loss''' loss = objectives.get('categorical_crossentropy') weighted_loss = models.weighted_objective(loss) y = K.placeholder(ndim=K.ndim(y_train)) weights = K.placeholder(ndim=1) train_loss = weighted_loss(y, y_train, weights, mask_train) test_loss = weighted_loss(y, y_test, weights, mask_test)
"""Build Layers""" for layer in layers: layer.build() """input, output""" # input of model X_train = data.input(train=True) X_test = data.input(train=False) # output of model y_train = output.output(train=True) y_test = output.output(train=False) mask_train = output.output_mask(train=True) # None in this example mask_test = output.output_mask(train=False) # None in this example print("X_train:", P.pprint(X_train)) print("X_test:", P.pprint(X_test)) print("y_train:") print(P.debugprint(y_train)) print("y_test:") print(P.debugprint(y_test)) """loss""" loss = objectives.get("categorical_crossentropy") weighted_loss = models.weighted_objective(loss) y = K.placeholder(ndim=K.ndim(y_train)) weights = K.placeholder(ndim=1) train_loss = weighted_loss(y, y_train, weights, mask_train) test_loss = weighted_loss(y, y_test, weights, mask_test)
def pretty(self, **kwargs): rval = super(External, self).pretty() if self.r.owner: rval += '\n= %s' % (pprint(self.r, dict(target=self.r))) return rval