def default_init(): self.actions_executed = 0 self.ntrain_called = 0 self.nstore_called = 0 self.experiences = [] nxt_model = src_model.deep_clone() inshape = list(src_model.get_input().shape()) batchin = [mbatch_size] + inshape # environment interaction self.obs = tc.EVariable(inshape, 0, 'obs', ctx=self.ctx) self.act_idx = tc.TenncorAPI(self.ctx).argmax(src_model.connect(self.obs)) self.act_idx.tag("recovery", "act_idx") # training self.src_obs = tc.EVariable(batchin, 0, 'src_obs', ctx=self.ctx) self.nxt_obs = tc.EVariable(batchin, 0, 'nxt_obs', ctx=self.ctx) self.src_outmask = tc.EVariable([mbatch_size] + list(src_model.shape()), 1, 'src_outmask', ctx=self.ctx) self.nxt_outmask = tc.EVariable([mbatch_size], 1, 'nxt_outmask', ctx=self.ctx) self.rewards = tc.EVariable([mbatch_size], 0, 'rewards', ctx=self.ctx) self.prediction_err = tc.api.identity(tc.apply_update([src_model, nxt_model], get_dqnupdate(update_fn, target_update_rate), get_dqnerror(self, discount_rate), ctx=self.ctx)) self.prediction_err.tag("recovery", "prediction_err") tc.optimize(optimize_cfg, self.ctx)
def default_init(): self.name = name self.dataset_idx = 0 self.api = tc.TenncorAPI(self.ctx) # prevent shuffling to allow predictable recovery self.dataset = helper.load(self.oxfile) self.train_inputs = train_inputs labelled_inputs = [ self.api.identity(train_input) for train_input in self.train_inputs ] outputs = connect_fn(labelled_inputs, self.ctx) if not isinstance(outputs, Iterable): outputs = [outputs] self.train_outputs = [ self.api.identity(train_output) for train_output in outputs ] for i, linput in enumerate(labelled_inputs): linput.tag('recovery', 'input_{}'.format(i)) for i, loutput in enumerate(self.train_outputs): loutput.tag('recovery', 'output_{}'.format(i)) tc.optimize(optimize_cfg, self.ctx)
def tc_mlp_grad(matrix_dim): import client.profile.profile as tc_prof import tenncor as tc n_in = matrix_dim n_out = int(n_in / 2) batch_size = 1 # regular mlp brain = tc.api.layer.link([ tc.api.layer.dense([n_in], [matrix_dim]), tc.api.layer.bind(tc.api.sigmoid), tc.api.layer.dense([matrix_dim], [n_out]), tc.api.layer.bind(tc.api.sigmoid), ]) invar = tc.Variable(np.zeros([batch_size, n_in], dtype=float), 'in') # out = brain.connect(invar) # expected_out = tc.Variable(np.zeros([batch_size, n_out], dtype=float), 'expected_out') # err = tc.api.square(expected_out - out) train_input = tc.Variable([batch_size, n_in], label='train_input') train_output = tc.Variable([batch_size, n_out], label='train_output') invar_batch = batch_generate(n_in, batch_size) test_batch = batch_generate(n_in, batch_size) test_batch_out = avgevry2(test_batch) invar.assign(invar_batch) train_input.assign(test_batch) train_output.assign(test_batch_out) train_err = tc.apply_update( [brain], lambda err, leaves: tc.api.approx.sgd( err, leaves, learning_rate=learning_rate), lambda models: tc.api. error.sqr_diff(train_output, models[0].connect(train_input))) tc.optimize("external/com_github_mingkaic_tenncor/cfg/optimizations.json") tc_prof.remote_profile('localhost:8069', [train_err])
def main(args): # default_ts = time.time() default_ts = 0 parser = argparse.ArgumentParser(description=prog_description) parser.add_argument('--seed', dest='seed', type=str2bool, nargs='?', const=False, default=True, help='Whether to seed or not (default: True)') parser.add_argument('--seedval', dest='seedval', type=int, nargs='?', default=int(default_ts), help='Random seed value (default: <current time>)') parser.add_argument('--nbatch', dest='nbatch', type=int, nargs='?', default=3, help='Batch size when training (default: 3)') parser.add_argument('--n_train', dest='n_train', type=int, nargs='?', default=3000, help='Number of times to train (default: 3000)') parser.add_argument('--n_test', dest='n_test', type=int, nargs='?', default=500, help='Number of times to test (default: 500)') parser.add_argument('--save', dest='save', nargs='?', default='', help='Filename to save model (default: <blank>)') parser.add_argument( '--load', dest='load', nargs='?', default='models/gd.onnx', help='Filename to load pretrained model (default: models/gd.onnx)') args = parser.parse_args(args) if args.seed: print('seeding {}'.format(args.seedval)) tc.seed(args.seedval) np.random.seed(args.seedval) nunits = 9 ninput = 10 noutput = int(ninput / 2) nbatch = args.nbatch train_input = tc.EVariable([nbatch, ninput]) train_exout = tc.EVariable([nbatch, noutput]) model = tc.api.layer.link([ tc.api.layer.dense([ninput], [nunits]), tc.api.layer.bind(tc.api.sigmoid), tc.api.layer.dense([nunits], [noutput]), tc.api.layer.bind(tc.api.sigmoid), ], train_input) train = tc.apply_update( [model], lambda err, leaves: tc.api.approx.sgd(err, leaves, learning_rate=0.9), lambda models: tc.api.loss.mean_squared(train_exout, models[0].connect( train_input))) untrained = model.deep_clone() trained = model.deep_clone() try: print('loading ' + args.load) trained = tc.load_from_file(args.load)[0] print('successfully loaded from ' + args.load) except Exception as e: print(e) print('failed to load from "{}"'.format(args.load)) testin = tc.EVariable([ninput], label='testin') untrained_out = untrained.connect(testin) trained_out = model.connect(testin) pretrained_out = trained.connect(testin) tc.optimize("cfg/optimizations.json") show_every_n = 500 start = time.time() for i in range(args.n_train): batch, batch_out = batch_generate(ninput, nbatch) train_input.assign(batch.reshape(nbatch, ninput)) train_exout.assign(batch_out.reshape(nbatch, noutput)) err = train.get() if i % show_every_n == show_every_n - 1: print('training {}\ntraining error:\n{}'.format(i + 1, err)) print('training time: {} seconds'.format(time.time() - start)) untrained_err = 0 trained_err = 0 pretrained_err = 0 for i in range(args.n_test): if i % show_every_n == show_every_n - 1: print('testing {}'.format(i + 1)) test_batch, test_batch_out = batch_generate(ninput, 1) testin.assign(test_batch) untrained_data = untrained_out.get() trained_data = trained_out.get() pretrained_data = pretrained_out.get() untrained_err += np.mean(abs(untrained_data - test_batch_out)) trained_err += np.mean(abs(trained_data - test_batch_out)) pretrained_err += np.mean(abs(pretrained_data - test_batch_out)) untrained_err /= args.n_test trained_err /= args.n_test pretrained_err /= args.n_test print('untrained mlp error rate: {}%'.format(untrained_err * 100)) print('trained mlp error rate: {}%'.format(trained_err * 100)) print('pretrained mlp error rate: {}%'.format(pretrained_err * 100)) print('is structurally equal?:', cmp.is_equal(pretrained_out, trained_out)) print('% data equal:', cmp.percent_dataeq(pretrained_out, trained_out)) try: print('saving') if tc.save_to_file(args.save, [model]): print('successfully saved to {}'.format(args.save)) except Exception as e: print(e) print('failed to write to "{}"'.format(args.save))
def main(args): default_ts = time.time() parser = argparse.ArgumentParser(description=prog_description) parser.add_argument('--seed', dest='seed', type=str2bool, nargs='?', const=False, default=False, help='Whether to seed or not (default: True)') parser.add_argument('--seedval', dest='seedval', type=int, nargs='?', default=int(default_ts), help='Random seed value (default: <current time>)') parser.add_argument('--n_train', dest='n_train', type=int, nargs='?', default=10001, help='Number of times of train (default: 10001)') parser.add_argument('--save', dest='save', nargs='?', default='', help='Filename to save model (default: <blank>)') parser.add_argument( '--load', dest='load', nargs='?', default='models/latin_lstm.onnx', help= 'Filename to load pretrained model (default: models/latin_lstm.onnx)') args = parser.parse_args(args) if args.seed: print('seeding {}'.format(args.seedval)) tc.seed(args.seedval) np.random.seed(args.seedval) else: np.random.seed(seed=0) # Read data and setup maps for integer encoding and decoding. data = open('models/data/latin_input.txt', 'r').read() chars = sorted(list( set(data))) # Sort makes model predictable (if seeded). data_size, vocab_size = len(data), len(chars) print('data has %d characters, %d unique.' % (data_size, vocab_size)) char_to_ix = {ch: i for i, ch in enumerate(chars)} ix_to_char = {i: ch for i, ch in enumerate(chars)} # Hyper parameters h_size, o_size, N = vocab_size, vocab_size, vocab_size # Hidden size is set to vocab_size, assuming that level of abstractness is approximately proportional to vocab_size (but can be set to any other value). seq_length = 25 # Longer sequence lengths allow for lengthier latent dependencies to be trained. learning_rate = 1e-1 print_interval = 100 def old_winit(shape, label): return tc.variable(np.random.uniform(-0.05, 0.05, shape.as_list()), label) model = tc.api.layer.link([ tc.api.layer.lstm(tc.Shape([N]), h_size, seq_length, kernel_init=tc.api.init.random_uniform(-0.05, 0.05)), tc.api.layer.dense([h_size], [o_size], kernel_init=tc.api.init.random_uniform(-0.05, 0.05)), tc.api.layer.bind(lambda x: tc.api.softmax(x, 0, 1)), ]) untrained_model = model.deep_clone() pretrained_model = model.deep_clone() try: print('loading ' + args.load) pretrained_model = tc.load_from_file(args.load)[0] print('successfully loaded from ' + args.load) except Exception as e: print(e) print('failed to load from "{}"'.format(args.load)) sample_inp = tc.EVariable([1, vocab_size], 0) trained_prob = tc.api.slice(model.connect(sample_inp), 0, 1, 1) untrained_prob = tc.api.slice(untrained_model.connect(sample_inp), 0, 1, 1) pretrained_prob = tc.api.slice(pretrained_model.connect(sample_inp), 0, 1, 1) train_inps = tc.EVariable([seq_length, vocab_size], 0) train_exout = tc.EVariable([seq_length, vocab_size], 0) train_err = tc.apply_update( [model], lambda error, leaves: tc.api.approx.adagrad( error, leaves, learning_rate=learning_rate, epsilon=1e-8), lambda models: encoded_loss(train_exout, models[0].connect(train_inps))) tc.optimize("cfg/optimizations.json") smooth_loss = -np.log(1.0 / vocab_size) * seq_length p = 0 start = time.time() for i in range(args.n_train): # Reset memory if appropriate if p + seq_length + 1 >= len(data) or i == 0: p = 0 # Get input and target sequence inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]] encoded_inp = one_encode(inputs, vocab_size) encoded_out = one_encode( [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]], vocab_size) # Occasionally sample from oldModel and print result if i % print_interval == 0: sample_ix = sample(sample_inp, trained_prob, inputs[0], 1000) print('----\n%s\n----' % (''.join(ix_to_char[ix] for ix in sample_ix))) # Get gradients for current oldModel based on input and target sequences train_inps.assign(encoded_inp) train_exout.assign(encoded_out) loss = train_err.get() smooth_loss = smooth_loss * 0.999 + loss * 0.001 # Occasionally print loss information if i % print_interval == 0: print('iter %d, loss: %f, smooth loss: %f' % (i, loss, smooth_loss)) print('batch training time: {} seconds'.format(time.time() - start)) start = time.time() # Prepare for next iteration p += seq_length untrained_sample = sample(sample_inp, untrained_prob, char_to_ix[data[0]], 1000) trained_sample = sample(sample_inp, trained_prob, char_to_ix[data[0]], 1000) pretrained_sample = sample(sample_inp, pretrained_prob, char_to_ix[data[0]], 1000) print('--untrained--\n%s\n----' % (''.join(ix_to_char[ix] for ix in untrained_sample))) print('--trained--\n%s\n----' % (''.join(ix_to_char[ix] for ix in trained_sample))) print('--pretrained--\n%s\n----' % (''.join(ix_to_char[ix] for ix in pretrained_sample))) try: print('saving') if tc.save_to_file(args.save, [model]): print('successfully saved to {}'.format(args.save)) except Exception as e: print(e) print('failed to write to "{}"'.format(args.save))
tc.api.layer.bind(tc.api.softmax), ]) training_data = generate_training_data(embedding, corpus) winput = tc.variable(np.random.rand(nwords) * 2 - 1, 'input') woutput = tc.variable(np.random.rand(2 * window, nwords) * 2 - 1, 'output') y_pred = model.connect(winput) train_err = tc.apply_update([model], lambda error, leaves: tc.api.approx.sgd(error, leaves, lr), lambda models: tc.api.reduce_sum(tc.api.pow( \ tc.api.extend(models[0].connect(winput), [1, 2 * window]) - woutput, 2.))) tc.optimize("cfg/optimizations.json") # Cycle through each epoch for i in range(epochs): # Intialise loss to 0 loss = 0 # Cycle through each training sample # w_t = vector for target word, w_c = vectors for context words for w_t, w_c in training_data: wcdata = np.array(w_c) ydata = y_pred.get().reshape(1, nwords) for j in range(2 * window - wcdata.shape[0]): wcdata = np.concatenate((wcdata, ydata), 0) winput.assign(np.array(w_t)) woutput.assign(wcdata)
def main(args): default_ts = time.time() parser = argparse.ArgumentParser(description=prog_description) parser.add_argument('--seed', dest='seed', type=str2bool, nargs='?', const=False, default=False, help='Whether to seed or not (default: True)') parser.add_argument('--seedval', dest='seedval', type=int, nargs='?', default=int(default_ts), help='Random seed value (default: <current time>)') parser.add_argument('--n_batch', dest='n_batch', type=int, nargs='?', default=100, help='Batch size when training (default: 100)') parser.add_argument('--n_train', dest='n_train', type=int, nargs='?', default=2000, help='Number of times to train (default: 2000)') parser.add_argument('--n_test', dest='n_test', type=int, nargs='?', default=5, help='Number of times to test (default: 5)') parser.add_argument('--save', dest='save', nargs='?', default='', help='Filename to save model (default: <blank>)') parser.add_argument( '--load', dest='load', nargs='?', default='models/rnn.onnx', help='Filename to load pretrained model (default: models/rnn.onnx)') args = parser.parse_args(args) if args.seed: print('seeding {}'.format(args.seedval)) tc.seed(args.seedval) np.random.seed(args.seedval) else: np.random.seed(seed=1) # dataset parameters n_train = args.n_train n_test = args.n_test sequence_len = 7 # training parameters n_batch = args.n_batch lmbd = 0.5 learning_rate = 0.05 momentum_term = 0.80 eps = 1e-6 # model parameters nunits = 3 # Number of states in the recurrent layer ninput = 2 noutput = 1 model = tc.api.layer.link([ tc.api.layer.dense(inshape=[ninput], hidden_dims=[nunits], kernel_init=weight_init), tc.api.layer.rnn(indim=nunits, hidden_dim=nunits, activation=tc.api.tanh, nseq=sequence_len, seq_dim=2, kernel_init=weight_init, bias_init=tc.api.init.zeros()), tc.api.layer.dense(inshape=[nunits], hidden_dims=[noutput], kernel_init=weight_init), #tc.api.layer.dense(inshape=[ninput], hidden_dims=[nunits]), #tc.api.layer.rnn(indim=nunits, hidden_dim=nunits, # activation=tc.api.tanh, nseq=sequence_len, seq_dim=2), #tc.api.layer.dense(inshape=[nunits], hidden_dims=[noutput]), tc.api.layer.bind(tc.api.sigmoid), ]) untrained = model.deep_clone() trained = model.deep_clone() try: print('loading ' + args.load) trained = tc.load_from_file(args.load)[0] print('successfully loaded from ' + args.load) except Exception as e: print(e) print('failed to load from "{}"'.format(args.load)) train_invar = tc.EVariable([n_batch, sequence_len, ninput]) train_exout = tc.EVariable([n_batch, sequence_len, noutput]) tinput = tc.api.permute(train_invar, [0, 2, 1]) toutput = tc.api.permute(train_exout, [0, 2, 1]) train_err = tc.apply_update( [model], lambda error, leaves: make_rms_prop( error, leaves, learning_rate, momentum_term, lmbd, eps), lambda models: loss(toutput, models[0].connect(tinput)) ) #tc.api.reduce_mean(tc.api.loss.cross_entropy(toutput, models[0].connect(tinput)))) # create training samples train_input, train_output = create_dataset(n_train, sequence_len) print(f'train_input tensor shape: {train_input.shape}') print(f'train_output tensor shape: {train_output.shape}') test_invar = tc.EVariable([n_test, sequence_len, ninput]) tin = tc.api.permute(test_invar, [0, 2, 1]) untrained_out = tc.api.round(untrained.connect(tin)) trained_out = tc.api.round(model.connect(tin)) pretrained_out = tc.api.round(trained.connect(tin)) tc.optimize("cfg/optimizations.json") ls_of_loss = [] start = time.time() for i in range(5): for j in range(n_train // n_batch): xbatch = train_input[j:j + n_batch, :, :] tbatch = train_output[j:j + n_batch, :, :] train_invar.assign(xbatch) train_exout.assign(tbatch) # Add loss to list to plot ls_of_loss.append(train_err.get()) print('training time: {} seconds'.format(time.time() - start)) # Plot the loss over the iterations fig = plt.figure(figsize=(5, 3)) plt.plot(ls_of_loss, 'b-') plt.xlabel('minibatch iteration') plt.ylabel('$\\xi$', fontsize=15) plt.title('Loss over backprop iteration') plt.xlim(0, 99) fig.subplots_adjust(bottom=0.2) plt.show() test_input, test_output = create_dataset(n_test, sequence_len) test_invar.assign(test_input) got_untrained = untrained_out.get() got_trained = trained_out.get() got_pretrained = pretrained_out.get() for i in range(test_input.shape[0]): left = test_input[i, :, 0] right = test_input[i, :, 1] expected = test_output[i, :, :] yuntrained = got_untrained[:, i, :] ytrained = got_trained[:, i, :] ypretrained = got_pretrained[:, i, :] left = ''.join([str(int(d)) for d in left]) left_num = int(''.join(reversed(left)), 2) right = ''.join([str(int(d)) for d in right]) right_num = int(''.join(reversed(right)), 2) expected = ''.join([str(int(d[0])) for d in expected]) expected_num = int(''.join(reversed(expected)), 2) yuntrained = ''.join([str(int(d[0])) for d in yuntrained]) yuntrained_num = int(''.join(reversed(yuntrained)), 2) ytrained = ''.join([str(int(d[0])) for d in ytrained]) ytrained_num = int(''.join(reversed(ytrained)), 2) ypretrained = ''.join([str(int(d[0])) for d in ypretrained]) ypretrained_num = int(''.join(reversed(ypretrained)), 2) print(f'left: {left:s} {left_num:2d}') print(f'right: + {right:s} {right_num:2d}') print(f' ------- --') print(f'expected: = {expected:s} {expected_num:2d}') print(f'untrained: = {yuntrained:s} {yuntrained_num:2d}') print(f'trained: = {ytrained:s} {ytrained_num:2d}') print(f'pretrained: = {ypretrained:s} {ypretrained_num:2d}') print('') try: print('saving') if tc.save_to_file(args.save, [model]): print('successfully saved to {}'.format(args.save)) except Exception as e: print(e) print('failed to write to "{}"'.format(args.save))
def main(args): default_ts = time.time() parser = argparse.ArgumentParser(description=prog_description) parser.add_argument('--seed', dest='seed', type=str2bool, nargs='?', const=False, default=True, help='Whether to seed or not (default: True)') parser.add_argument('--seedval', dest='seedval', type=int, nargs='?', default=int(default_ts), help='Random seed value (default: <current time>)') parser.add_argument( '--pre_nepoch', dest='pretrain_epochs', type=int, nargs='?', default=1000, help='Number of epochs when pretraining (default: 1000)') parser.add_argument('--nepochs', dest='finetune_epochs', type=int, nargs='?', default=200, help='Number of epochs when finetuning (default: 200)') parser.add_argument( '--cdk', dest='cdk', type=int, nargs='?', default=1, help='Length of the Contrastive divergence chain (default: 1)') parser.add_argument('--save', dest='save', nargs='?', default='', help='Filename to save model (default: <blank>)') parser.add_argument( '--load', dest='load', nargs='?', default='models/dbn.onnx', help='Filename to load pretrained model (default: models/dbn.onnx)') args = parser.parse_args(args) if args.seed: print('seeding {}'.format(args.seedval)) tc.seed(args.seedval) np.random.seed(args.seedval) x = np.array([[1, 1, 1, 0, 0, 0], [1, 0, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 0, 1]]) y = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1], [0, 0]]) # construct DBN rbms = [ tc.api.layer.rbm(6, 3), tc.api.layer.rbm(3, 3), ] dense = tc.api.layer.dense([3], [2], kernel_init=tc.api.init.zeros()) softmax_dim = 0 rbm_interlace = zip([rbm.fwd() for rbm in rbms], len(rbms) * [tc.api.layer.bind(tc.api.sigmoid)]) model = tc.api.layer.link( [e for inters in rbm_interlace for e in inters] + [ dense, tc.api.layer.bind(lambda x: tc.api.softmax(x, softmax_dim, 1)) ]) untrained = model.deep_clone() trained = model.deep_clone() try: print('loading ' + args.load) trained = tc.load_from_file(args.load)[0] print('successfully loaded from ' + args.load) except Exception as e: print(e) print('failed to load from "{}"'.format(args.load)) trainer = tc.DBNTrainer(rbms, dense, softmax_dim, x.shape[0], pretrain_lr=0.1, train_lr=0.1, cdk=args.cdk) def pretrain_log(epoch, layer): if epoch % 100 == 0: print('Pre-training layer {}, epoch {}, cost {}'.format( layer, epoch, trainer.reconstruction_cost(layer))) def finetune_log(epoch): if epoch % 100 == 0: print('Training epoch {}, cost {}'.format(epoch, trainer.training_cost())) # pre-training (TrainUnsupervisedDBN) trainer.pretrain(x, nepochs=args.pretrain_epochs, logger=pretrain_log) # fine-tuning (DBNSupervisedFineTuning) trainer.finetune(x, y, nepochs=args.finetune_epochs, logger=finetune_log) # test x = np.array([1, 1, 0, 0, 0, 0]) var = tc.variable(x) untrained_out = untrained.connect(var) out = model.connect(var) trained_out = trained.connect(var) tc.optimize("cfg/optimizations.json") # since x is similar to first 3 rows of x, expect results simlar to first 3 rows of y [1, 0] print('untrained_out: ', untrained_out.get()) print('out: ', out.get()) print('trained_out: ', trained_out.get()) try: print('saving') if tc.save_to_file(args.save, [model]): print('successfully saved to {}'.format(args.save)) except Exception as e: print(e) print('failed to write to "{}"'.format(args.save))
def main(args): default_ts = time.time() parser = argparse.ArgumentParser(description=prog_description) parser.add_argument('--seed', dest='seed', type=str2bool, nargs='?', const=False, default=False, help='Whether to seed or not (default: True)') parser.add_argument('--seedval', dest='seedval', type=int, nargs='?', default=int(default_ts), help='Random seed value (default: <current time>)') parser.add_argument('--n_train', dest='n_train', type=int, nargs='?', default=100, help='Number of times of train (default: 100)') parser.add_argument('--save', dest='save', nargs='?', default='', help='Filename to save model (default: <blank>)') parser.add_argument('--load', dest='load', nargs='?', default='models/fast_lstm.onnx', help='Filename to load pretrained model (default: models/fast_lstm.onnx)') args = parser.parse_args(args) if args.seed: print('seeding {}'.format(args.seedval)) tc.seed(args.seedval) np.random.seed(args.seedval) else: np.random.seed(seed=0) # parameters for input data dimension and lstm cell count mem_cell_ct = 100 x_dim = 50 y_list = [-0.5, 0.2, 0.1, -0.5] input_val_arr = [np.random.random(x_dim) for _ in y_list] model = tc.api.layer.lstm(tc.Shape([x_dim]), mem_cell_ct, len(y_list), kernel_init=tc.api.init.xavier_uniform(1), bias_init=tc.api.init.xavier_uniform(1)) untrained_model = model.deep_clone() pretrained_model = model.deep_clone() try: print('loading ' + args.load) pretrained_model = tc.load_from_file(args.load)[0] print('successfully loaded from ' + args.load) except Exception as e: print(e) print('failed to load from "{}"'.format(args.load)) test_inputs = tc.variable(np.array(input_val_arr), 'test_input') test_exout = tc.variable(np.array(y_list), 'test_exout') untrained = tc.api.slice(untrained_model.connect(test_inputs), 0, 1, 0) hiddens = tc.api.slice(model.connect(test_inputs), 0, 1, 0) pretrained = tc.api.slice(pretrained_model.connect(test_inputs), 0, 1, 0) err = tc.api.reduce_sum(loss(test_exout, hiddens)) train_err = tc.apply_update([model], lambda error, leaves: tc.api.approx.sgd(error, leaves, learning_rate=0.1), lambda models: loss(test_exout, models[0].connect(test_inputs))) tc.optimize("cfg/optimizations.json") start = time.time() for cur_iter in range(args.n_train): train_err.get() print("iter {}: y_pred = {}, loss: {}".format( cur_iter, hiddens.get().flatten(), err.get())) print("expecting = {}".format(np.array(y_list))) print("untrained_y_pred = {}".format(untrained.get().flatten())) print("trained_y_pred = {}".format(hiddens.get().flatten())) print("pretrained_y_pred = {}".format(pretrained.get().flatten())) print('training time: {} seconds'.format(time.time() - start)) try: print('saving') if tc.save_to_file(args.save, [model]): print('successfully saved to {}'.format(args.save)) except Exception as e: print(e) print('failed to write to "{}"'.format(args.save))