def create_regressor(rng=np.random, batchsize=1, window=240, input=1, dropout=0.25): print('inside create_regressor') return Network( #DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(64, input, 45), input_shape=(batchsize, input, window), rng=rng), BiasLayer(shape=(64, 1)), ActivationLayer(), #DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(128, 64, 25), input_shape=(batchsize, 64, window), rng=rng), BiasLayer(shape=(128, 1)), ActivationLayer(), #DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(256, 128, 15), input_shape=(batchsize, 128, window), rng=rng), BiasLayer(shape=(256, 1)), ActivationLayer(), Pool1DLayer(input_shape=(batchsize, 256, window)))
def create_footstepper(rng=np.random, batchsize=1, window=250, dropout=0.25): return Network( DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(64, 3, 65), input_shape=(batchsize, 3, window), rng=rng), BiasLayer(shape=(64, 1)), ActivationLayer(), DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(5, 64, 45), input_shape=(batchsize, 64, window), rng=rng), BiasLayer(shape=(5, 1)), )
def __init__(self, rng=rng, input_shape=1, output_shape=1, dropout=0.7): self.nslices = 4 self.dropout0 = DropoutLayer(dropout, rng=rng) self.dropout1 = DropoutLayer(dropout, rng=rng) self.dropout2 = DropoutLayer(dropout, rng=rng) self.activation = ActivationLayer('ELU') self.W0 = HiddenLayer((self.nslices, 512, input_shape-1), rng=rng, gamma=0.01) self.W1 = HiddenLayer((self.nslices, 512, 512), rng=rng, gamma=0.01) self.W2 = HiddenLayer((self.nslices, output_shape, 512), rng=rng, gamma=0.01) self.b0 = BiasLayer((self.nslices, 512)) self.b1 = BiasLayer((self.nslices, 512)) self.b2 = BiasLayer((self.nslices, output_shape)) self.layers = [ self.W0, self.W1, self.W2, self.b0, self.b1, self.b2] self.params = sum([layer.params for layer in self.layers], [])
def createcore_rightleg(rng=np.random, batchsize=1, window=240, dropout=0.25, depooler='random'): return Network( Network( DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(256, 12, 25), input_shape=(batchsize, 12, window), rng=rng), BiasLayer(shape=(256, 1)), ActivationLayer(), Pool1DLayer(input_shape=(batchsize, 256, window)), ), Network( Depool1DLayer(output_shape=(batchsize, 256, window), depooler='random', rng=rng), DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(12, 256, 25), input_shape=(batchsize, 256, window), rng=rng), BiasLayer(shape=(12, 1))))
def create_core(rng=np.random, batchsize=1, window=240, dropout=0.25, depooler='random'): print('inside create_core') return Network( Network( DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(256, 73, 25), input_shape=(batchsize, 73, window), rng=rng), BiasLayer(shape=(256, 1)), ActivationLayer(), Pool1DLayer(input_shape=(batchsize, 256, window)), ), Network( Depool1DLayer(output_shape=(batchsize, 256, window), depooler='random', rng=rng), DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(73, 256, 25), input_shape=(batchsize, 256, window), rng=rng), BiasLayer(shape=(73, 1))))
def __init__(self, rng=rng, input_shape=1, output_shape=1, dropout=0.7): self.nslices = 4 self.dropout0 = DropoutLayer(dropout, rng=rng) self.dropout1 = DropoutLayer(dropout, rng=rng) self.dropout2 = DropoutLayer(dropout, rng=rng) self.activation = ActivationLayer('ELU') self.W0 = HiddenLayer((self.nslices, 512, input_shape - 1), rng=rng, gamma=0.01) self.W1 = HiddenLayer((self.nslices, 512, 512), rng=rng, gamma=0.01) self.W2 = HiddenLayer((self.nslices, output_shape, 512), rng=rng, gamma=0.01) self.b0 = BiasLayer((self.nslices, 512)) self.b1 = BiasLayer((self.nslices, 512)) self.b2 = BiasLayer((self.nslices, output_shape)) self.ang_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.ang_b = BiasLayer((self.nslices, 512)) self.chi_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.chi_b = BiasLayer((self.nslices, 512)) self.dep_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.dep_b = BiasLayer((self.nslices, 512)) self.neu_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.neu_b = BiasLayer((self.nslices, 512)) self.old_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.old_b = BiasLayer((self.nslices, 512)) self.pro_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.pro_b = BiasLayer((self.nslices, 512)) self.sex_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.sex_b = BiasLayer((self.nslices, 512)) self.str_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.str_b = BiasLayer((self.nslices, 512)) self.layers = [ self.W0, self.W1, self.W2, self.b0, self.b1, self.b2, self.ang_W, self.ang_b, self.chi_W, self.chi_b, self.dep_W, self.dep_b, self.neu_W, self.neu_b, self.old_W, self.old_b, self.pro_W, self.pro_b, self.sex_W, self.sex_b, self.str_W, self.str_b ] self.params = sum([layer.params for layer in self.layers], []) ang_label = np.zeros(L.shape[1]) ang_label[w * 0:w * 1] = 1 chi_label = np.zeros(L.shape[1]) chi_label[w * 1:w * 2] = 1 dep_label = np.zeros(L.shape[1]) dep_label[w * 2:w * 3] = 1 neu_label = np.zeros(L.shape[1]) neu_label[w * 3:w * 4] = 1 old_label = np.zeros(L.shape[1]) old_label[w * 4:w * 5] = 1 pro_label = np.zeros(L.shape[1]) pro_label[w * 5:w * 6] = 1 sex_label = np.zeros(L.shape[1]) sex_label[w * 6:w * 7] = 1 str_label = np.zeros(L.shape[1]) str_label[w * 7:w * 8] = 1 self.ang_label = theano.shared(ang_label, borrow=True) self.chi_label = theano.shared(chi_label, borrow=True) self.dep_label = theano.shared(dep_label, borrow=True) self.neu_label = theano.shared(neu_label, borrow=True) self.old_label = theano.shared(old_label, borrow=True) self.pro_label = theano.shared(pro_label, borrow=True) self.sex_label = theano.shared(sex_label, borrow=True) self.str_label = theano.shared(str_label, borrow=True) zeros = np.zeros((1, output_shape)) self.zeros = T.addbroadcast(theano.shared(zeros, borrow=True), 0)
def __init__(self, rng=rng, input_shape=1, output_shape=1, dropout=0.7, dropout_res=0.5, style='Balance', batchsize=20): self.style = style self.batchsize = batchsize self.nslices = 4 self.dropout0 = DropoutLayer(dropout, rng=rng) self.dropout1 = DropoutLayer(dropout, rng=rng) self.dropout_res = DropoutLayer(dropout_res, rng=rng) self.dropout2 = DropoutLayer(dropout, rng=rng) self.activation = ActivationLayer('ELU') W0_load = np.empty((self.nslices, 512, input_shape - 1), dtype=np.float32) W1_load = np.empty((self.nslices, 512, 512), dtype=np.float32) W2_load = np.empty((self.nslices, output_shape, 512), dtype=np.float32) b0_load = np.empty((self.nslices, 512), dtype=np.float32) b1_load = np.empty((self.nslices, 512), dtype=np.float32) b2_load = np.empty((self.nslices, output_shape), dtype=np.float32) for i in range(4): W0_load[i] = np.fromfile( './Parameters/' + mname + '/W0_%03i.bin' % (int)(i * 12.5), dtype=np.float32).reshape(512, input_shape - 1) W1_load[i] = np.fromfile('./Parameters/' + mname + '/W1_%03i.bin' % (int)(i * 12.5), dtype=np.float32).reshape(512, 512) W2_load[i] = np.fromfile( './Parameters/' + mname + '/W2_%03i.bin' % (int)(i * 12.5), dtype=np.float32).reshape(output_shape, 512) b0_load[i] = np.fromfile('./Parameters/' + mname + '/b0_%03i.bin' % (int)(i * 12.5), dtype=np.float32) b1_load[i] = np.fromfile('./Parameters/' + mname + '/b1_%03i.bin' % (int)(i * 12.5), dtype=np.float32) b2_load[i] = np.fromfile('./Parameters/' + mname + '/b2_%03i.bin' % (int)(i * 12.5), dtype=np.float32) self.W0 = HiddenLayer((self.nslices, 512, input_shape - 1), rng=rng, gamma=0.01) self.W1 = HiddenLayer((self.nslices, 512, 512), rng=rng, gamma=0.01) self.W2 = HiddenLayer((self.nslices, output_shape, 512), rng=rng, gamma=0.01) self.b0 = BiasLayer((self.nslices, 512)) self.b1 = BiasLayer((self.nslices, 512)) self.b2 = BiasLayer((self.nslices, output_shape)) self.W0.W.set_value(W0_load) self.W1.W.set_value(W1_load) self.W2.W.set_value(W2_load) self.b0.b.set_value(b0_load) self.b1.b.set_value(b1_load) self.b2.b.set_value(b2_load) self.style_W0 = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.style_b = BiasLayer((self.nslices, 512)) self.layers = [self.style_W0, self.style_b] self.params = sum( [layer.params for layer in self.layers], [] ) # The only parameters we want to update are the residual adapter ones style_label = np.zeros(L.shape[1]) style_label[w * styletransfer_styles.index(self.style):w * (styletransfer_styles.index(self.style) + 1)] = 1 self.style_label = theano.shared(style_label, borrow=True) zeros = np.zeros((1, output_shape)) self.zeros = T.addbroadcast(theano.shared(zeros, borrow=True), 0)