def _model_loss(images, depths, invalid_depths, mode): # Compute the moving average of all losses flag_reuse_train_eval = tf.placeholder(tf.bool) flag_trainable_train_eval = tf.placeholder(tf.bool) if (mode == 'train'): flag_reuse_train_eval = False flag_trainable_train_eval = True elif (mode == 'eval'): flag_reuse_train_eval = True flag_trainable_train_eval = False with tf.variable_scope(tf.get_variable_scope()): logits = inference(images, reuse=flag_reuse_train_eval, trainable=flag_trainable_train_eval) loss(logits, depths, invalid_depths) total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss') # Compute the moving average of total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') loss_averages_op = loss_averages.apply([total_loss]) with tf.control_dependencies([loss_averages_op]): total_loss = tf.identity(total_loss) return total_loss, logits
def test_twod_target(self): X, Z, Z_corrupt = setup_sinusidal_set() model = online.regression.LinReg(dim_in=X.shape[1], dim_out=Z.shape[1], dim_basis=X.shape[1], basis_fcts='polynomial') self._fit(model, X, Z) print model.loss(X, Z)
def test_twod_target(self): X, Z, Z_corrupt = setup_sinusidal_set() model = online.regression.LinReg( dim_in=X.shape[1], dim_out=Z.shape[1], dim_basis=X.shape[1], basis_fcts='polynomial' ) self._fit(model, X, Z) print model.loss(X, Z)
def test_oned_target(self, W=np.array([1, 1, 1])): X, Z, Z_corrupt = setup_parabula_set(n=100000, W=W) model = online.regression.LinReg(dim_in=X.shape[1], dim_out=Z.shape[1], dim_basis=X.shape[1], basis_fcts='polynomial') self._fit(model, X, Z) sum_w = np.sum(W) sum_mw = np.sum(model._W) assert sum_w * 0.9 < sum_mw, 'Predicted weights out of bounds, ' + \ 'original weights are: {}, predicted weights are: {}'.format( W, model._W) assert sum_w * 1.1 > sum_mw, 'Predicted weights out of bounds, ' + \ 'original weights are: {}, predicted weights are: {}'.format( W, model._W) print model.loss(X, Z)
def test_oned_target(self,W=np.array([1,1,1])): X, Z, Z_corrupt = setup_parabula_set(n=100000, W=W) model = online.regression.LinReg( dim_in=X.shape[1], dim_out=Z.shape[1], dim_basis=X.shape[1], basis_fcts='polynomial' ) self._fit(model, X, Z) sum_w = np.sum(W) sum_mw = np.sum(model._W) assert sum_w * 0.9 < sum_mw, 'Predicted weights out of bounds, ' + \ 'original weights are: {}, predicted weights are: {}'.format( W, model._W) assert sum_w * 1.1 > sum_mw, 'Predicted weights out of bounds, ' + \ 'original weights are: {}, predicted weights are: {}'.format( W, model._W) print model.loss(X, Z)
def test_learn_online(self): e = threading.Event() try: urlemg = 'tcp://192.168.0.20:5555' emgp = online.publisher.EmgPublisher(urlemg, abort=e) emgs = online.sources.FileSource(emgp, 4000, 'emg_data', abort=e) emgsub = online.subscriber.EmgSubscriber(urlemg, abort=e) emgiter = online.subscriber.array_iterator(ArrayMessage, emgsub) urlkin = 'tcp://192.168.0.20:5556' kinp = online.publisher.KinPublisher(urlkin, abort=e) kins = online.sources.FileSource(kinp, 500, 'kin_data', abort=e) kinsub = online.subscriber.KinSubscriber(urlkin, abort=e) kiniter = online.subscriber.array_iterator(ArrayMessage, kinsub) #sigmoid = lambda X: 1 / (1 + np.exp(X)) identity = lambda X: X model = online.regression.LinReg( dim_in=ArrayMessage.duration * kins.samplingrate * 5, dim_out=ArrayMessage.duration * kins.samplingrate * 3, dim_basis=ArrayMessage.duration * kins.samplingrate * 5, basis_fcts=identity ) print 'Calculated shapes: dim_in={}, dim_out={}, dim_basis={}'.format( ArrayMessage.duration * kins.samplingrate * 5, ArrayMessage.duration * kins.samplingrate * 3, ArrayMessage.duration * kins.samplingrate * 5 ) print 'start threads' emgp.start() emgs.start() emgsub.start() kinp.start() kins.start() kinsub.start() count = 0 while count < 1000: Z = kiniter.next().data[:, [2,7,9]] X = emgiter.next().data X_ = X.reshape(Z.shape[0], -1, X.shape[1]) X = np.mean(X_, axis=1) Z = Z.flatten().reshape(1, -1) X = X.flatten().reshape(1, -1) model.train(X,Z) if count % 100 == 0: print '{}\t\t{}'.format(count, model.loss(X, Z)) count += 1 e.set() except Exception as ex: e.set() raise ex e.set()
def test_learn_online(self): e = threading.Event() try: urlemg = 'tcp://192.168.0.20:5555' emgp = online.publisher.EmgPublisher(urlemg, abort=e) emgs = online.sources.FileSource(emgp, 4000, 'emg_data', abort=e) emgsub = online.subscriber.EmgSubscriber(urlemg, abort=e) emgiter = online.subscriber.array_iterator(ArrayMessage, emgsub) urlkin = 'tcp://192.168.0.20:5556' kinp = online.publisher.KinPublisher(urlkin, abort=e) kins = online.sources.FileSource(kinp, 500, 'kin_data', abort=e) kinsub = online.subscriber.KinSubscriber(urlkin, abort=e) kiniter = online.subscriber.array_iterator(ArrayMessage, kinsub) #sigmoid = lambda X: 1 / (1 + np.exp(X)) identity = lambda X: X model = online.regression.LinReg( dim_in=ArrayMessage.duration * kins.samplingrate * 5, dim_out=ArrayMessage.duration * kins.samplingrate * 3, dim_basis=ArrayMessage.duration * kins.samplingrate * 5, basis_fcts=identity) print 'Calculated shapes: dim_in={}, dim_out={}, dim_basis={}'.format( ArrayMessage.duration * kins.samplingrate * 5, ArrayMessage.duration * kins.samplingrate * 3, ArrayMessage.duration * kins.samplingrate * 5) print 'start threads' emgp.start() emgs.start() emgsub.start() kinp.start() kins.start() kinsub.start() count = 0 while count < 1000: Z = kiniter.next().data[:, [2, 7, 9]] X = emgiter.next().data X_ = X.reshape(Z.shape[0], -1, X.shape[1]) X = np.mean(X_, axis=1) Z = Z.flatten().reshape(1, -1) X = X.flatten().reshape(1, -1) model.train(X, Z) if count % 100 == 0: print '{}\t\t{}'.format(count, model.loss(X, Z)) count += 1 e.set() except Exception as ex: e.set() raise ex e.set()
def _fit(self, model, X, Z, batchsize=10, valeval=50, alpha=0.01): iter = 0 best_loss = 1000000 best_W = None while iter + batchsize < X.shape[0]: model.train(X[iter:iter+batchsize], Z[iter:iter+batchsize],alpha) if iter/batchsize % 50 == 0: loss = model.loss(X[iter:iter+batchsize], Z[iter:iter+batchsize]) if loss < best_loss: best_loss = loss best_W = model._W print 'best loss: {} -- current loss: {}'.format(best_loss, loss) iter += batchsize model._W = best_W
def _fit(self, model, X, Z, batchsize=10, valeval=50, alpha=0.01): iter = 0 best_loss = 1000000 best_W = None while iter + batchsize < X.shape[0]: model.train(X[iter:iter + batchsize], Z[iter:iter + batchsize], alpha) if iter / batchsize % 50 == 0: loss = model.loss(X[iter:iter + batchsize], Z[iter:iter + batchsize]) if loss < best_loss: best_loss = loss best_W = model._W print 'best loss: {} -- current loss: {}'.format( best_loss, loss) iter += batchsize model._W = best_W
def evaluate_loss(model, dataset, params): with tf.Session(config=tf.ConfigProto( inter_op_parallelism_threads=params.num_cores, intra_op_parallelism_threads=params.num_cores, gpu_options=tf.GPUOptions(allow_growth=True))) as session: tf.local_variables_initializer().run() tf.global_variables_initializer().run() saver = tf.train.Saver(tf.global_variables()) ckpt = tf.train.get_checkpoint_state(params.model) saver.restore(session, ckpt.model_checkpoint_path) print('evaluating loss') avg_loss, loss_list = m.loss( model, dataset.batches('validation', params.batch_size, num_epochs=1), session) print('loss: {}'.format(avg_loss)) plt.plot(loss_list) plt.show()
def evaluate_loss(model, dataset, params, session): print('evaluating loss') loss = m.loss( model, dataset.batches('validation', params.batch_size, num_epochs=1), session) print('loss: {}'.format(loss))
def train(): """Train datasets for a number of steps.""" with tf.Graph().as_default(): global_step = tf.Variable(0, trainable=False) # Get images and labels for model. images, labels = model.distorted_inputs() # Build a Graph that computes the logits predictions from the # inference model. logits = model.inference(images) # Calculate loss. loss = model.loss(logits, labels) # Build a Graph that trains the model with one batch of examples and # updates the model parameters. train_op = model.train(loss, global_step) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.summary.merge_all() # Build an initialization operation to run below. init = tf.global_variables_initializer() # Start running operations on the Graph. sess = tf.Session(config=tf.ConfigProto( log_device_placement=FLAGS.log_device_placement )) # log_device_placement=True,该参数表示程序会将运行每一个操作的设备输出到屏幕 sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph_def=sess.graph_def) for step in range(FLAGS.max_steps): start_time = time.time() _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time assert not np.isnan(loss_value), 'Model diverged with loss = NaN' if step % 10 == 0: num_examples_per_step = FLAGS.batch_size examples_per_sec = num_examples_per_step / duration sec_per_batch = float(duration) format_str = ( '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)' ) print(format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch)) if step % 100 == 0: summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, step) # Save the model checkpoint periodically. if step % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step)