def __init__(self, gpu_id=GPU_ID): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) basedir = os.path.dirname(__file__) tf.Graph().as_default() self.imagePlaceholder = tf.placeholder(tf.uint8, shape=(None, CROP_SIZE, CROP_SIZE, 3)) self.prevLstmState = tuple([ tf.placeholder(tf.float32, shape=(None, LSTM_SIZE)) for _ in range(4) ]) self.batch_size = tf.placeholder(tf.int32, shape=()) self.outputs, self.state1, self.state2 = network.inference( self.imagePlaceholder, num_unrolls=1, batch_size=self.batch_size, train=False, prevLstmState=self.prevLstmState) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) ckpt = tf.train.get_checkpoint_state( os.path.join(basedir, '..', LOG_DIR, 'checkpoints')) if ckpt is None: raise IOError(( 'Checkpoint model could not be found. ' 'Did you download the pretrained weights? ' 'Download them here: https://goo.gl/NWGXGM and read the Model section of the Readme.' )) tf_util.restore(self.sess, ckpt.model_checkpoint_path) self.tracked_data = {} self.time = 0 self.total_forward_count = -1
def __init__(self, checkpoint_dir=os.path.join(os.path.dirname(__file__), '..', LOG_DIR, 'checkpoints'), gpu_id=GPU_ID, profiler: Profiler = None): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) tf.Graph().as_default() self.imagePlaceholder = tf.placeholder(tf.uint8, shape=(None, CROP_SIZE, CROP_SIZE, 3)) self.prevLstmState = tuple([tf.placeholder(tf.float32, shape=(None, LSTM_SIZE)) for _ in range(4)]) self.batch_size = tf.placeholder(tf.int32, shape=()) self.outputs, self.state1, self.state2 = network.inference( self.imagePlaceholder, num_unrolls=1, batch_size=self.batch_size, train=False, prevLstmState=self.prevLstmState) self.sess = tf_util.Session() self.sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt is None: raise IOError( ('Checkpoint model could not be found. ' 'Did you download the pretrained weights? ' 'Download them here: http://bit.ly/2L5deYF and read the Model section of the Readme.')) tf_util.restore(self.sess, ckpt.model_checkpoint_path) self.tracked_data = {} self.time = 0 self.total_forward_count = -1 self._profiler: Profiler = profiler_pipe(profiler) self._re3_crop_profiler = "re3 cropping image" self._re3_crop2_profiler = "re3 cropping image 2" self._re3_sess_profiler = "re3 session run" self._re3_sess2_profiler = "re3 session run 2"
def __init__(self, sess, copy_vars, gpu=None): self.sess = sess self.imagePlaceholder = tf.placeholder(tf.uint8, shape=(None, CROP_SIZE, CROP_SIZE, 3)) self.prevLstmState = tuple([tf.placeholder(tf.float32, shape=(None, LSTM_SIZE)) for _ in range(4)]) self.batch_size = tf.placeholder(tf.int32, shape=()) network_scope = 'test_network' if gpu is not None: with tf.device('/gpu:' + str(gpu)): with tf.variable_scope(network_scope): self.outputs, self.state1, self.state2 = network.inference( self.imagePlaceholder, num_unrolls=1, batch_size=self.batch_size, train=False, prevLstmState=self.prevLstmState) else: with tf.variable_scope(network_scope): self.outputs, self.state1, self.state2 = network.inference( self.imagePlaceholder, num_unrolls=1, batch_size=self.batch_size, train=False, prevLstmState=self.prevLstmState) local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=network_scope) self.sync_op = self.sync_from(copy_vars, local_vars) self.tracked_data = {} self.time = 0 self.total_forward_count = -1
def __init__(self, gpu_id=GPU_ID): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) if getattr(sys, 'frozen', False): # application_path = os.path.dirname(sys.executable) basedir = os.path.dirname(sys.executable) elif __file__: # application_path = os.path.dirname(__file__) basedir = os.path.dirname(__file__) tf.Graph().as_default() self.imagePlaceholder = tf.placeholder(tf.uint8, shape=(None, CROP_SIZE, CROP_SIZE, 3)) self.prevLstmState = tuple([ tf.placeholder(tf.float32, shape=(None, LSTM_SIZE)) for _ in range(4) ]) self.batch_size = tf.placeholder(tf.int32, shape=()) self.outputs, self.state1, self.state2, self.conv_layers1 = network.inference( self.imagePlaceholder, num_unrolls=1, batch_size=self.batch_size, train=False, prevLstmState=self.prevLstmState) self.conv_layers = network.returnConvLayers( self.imagePlaceholder, num_unrolls=1, batch_size=self.batch_size, train=False, prevLstmState=self.prevLstmState) self.sess = tf_util.Session() self.sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state( os.path.join(basedir, '..', LOG_DIR, 'checkpoints')) if ckpt is None: raise IOError(( 'Checkpoint model could not be found. ' 'Did you download the pretrained weights? ' 'Download them here: http://bit.ly/2L5deYF and read the Model section of the Readme.' )) tf_util.restore(self.sess, ckpt.model_checkpoint_path) self.tracked_data = {} self.time = 0 self.total_forward_count = -1
return matches, np.array(unmatched_detections), np.array( unmatched_trackers) #Main function begin here tf_vars = {} tf.Graph().as_default() tf_vars['imagePlaceholder'] = tf.placeholder(tf.uint8, shape=(None, 227, 227, 3)) tf_vars['prevLstmState'] = tuple( [tf.placeholder(tf.float32, shape=(None, 1024)) for _ in range(4)]) tf_vars['batch_size'] = tf.placeholder(tf.int32, shape=()) tf_vars['outputs'], tf_vars['state1'], tf_vars['state2'] = network.inference( tf_vars['imagePlaceholder'], num_unrolls=1, batch_size=tf_vars['batch_size'], train=False, prevLstmState=tf_vars['prevLstmState']) config = tf.ConfigProto() config.gpu_options.allow_growth = True tf_vars['sess'] = tf.Session(config=config) ckpt = tf.train.get_checkpoint_state( os.path.join(basedir, '..', '../logs/', 'checkpoints')) tf_util.restore(sess, ckpt.model_checkpoint_path) tf_vars['tracked_data'] = {} tf_vars['total_forward_count'] = -1 image_paths = sorted( glob.glob(os.path.join(os.path.dirname(__file__), 'data', '*.jpg'))) seq = '/home/deepak/Desktop/Ass2/re3-tensorflow/MOT17-01-DPM' seq_dets = np.loadtxt('%s/det/det.txt' % (seq), delimiter=',')
def main(FLAGS): global PORT, delta, REPLAY_BUFFER_SIZE delta = FLAGS.delta batchSize = FLAGS.batch_size timing = FLAGS.timing debug = FLAGS.debug or FLAGS.output PORT = FLAGS.port os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.cuda_visible_devices) np.set_printoptions(suppress=True) np.set_printoptions(precision=4) # Tensorflow setup if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) if not os.path.exists(LOG_DIR + '/checkpoints'): os.makedirs(LOG_DIR + '/checkpoints') tf.Graph().as_default() tf.logging.set_verbosity(tf.logging.INFO) sess = tf_util.Session() # Create the nodes for single image forward passes for learning to fix mistakes. # Parameters here are shared with the learned network. if ',' in FLAGS.cuda_visible_devices: with tf.device('/gpu:1'): forwardNetworkImagePlaceholder = tf.placeholder( tf.uint8, shape=(2, CROP_SIZE, CROP_SIZE, 3)) prevLstmState = tuple([ tf.placeholder(tf.float32, shape=(1, LSTM_SIZE)) for _ in range(4) ]) initialLstmState = tuple( [np.zeros((1, LSTM_SIZE)) for _ in range(4)]) networkOutputs, state1, state2 = network.inference( forwardNetworkImagePlaceholder, num_unrolls=1, train=False, prevLstmState=prevLstmState, reuse=False) else: forwardNetworkImagePlaceholder = tf.placeholder(tf.uint8, shape=(2, CROP_SIZE, CROP_SIZE, 3)) prevLstmState = tuple([ tf.placeholder(tf.float32, shape=(1, LSTM_SIZE)) for _ in range(4) ]) initialLstmState = tuple([np.zeros((1, LSTM_SIZE)) for _ in range(4)]) networkOutputs, state1, state2 = network.inference( forwardNetworkImagePlaceholder, num_unrolls=1, train=False, prevLstmState=prevLstmState, reuse=False) tf_dataset_obj = tf_dataset.Dataset(sess, delta, batchSize * 2, PORT, debug=FLAGS.debug) tf_dataset_obj.initialize_tf_placeholders(forwardNetworkImagePlaceholder, prevLstmState, networkOutputs, state1, state2) tf_dataset_iterator = tf_dataset_obj.get_dataset(batchSize) imageBatch, labelsBatch = tf_dataset_iterator.get_next() imageBatch = tf.reshape(imageBatch, (batchSize * delta * 2, CROP_SIZE, CROP_SIZE, 3)) labelsBatch = tf.reshape(labelsBatch, (batchSize * delta, -1)) learningRate = tf.placeholder(tf.float32) imagePlaceholder = tf.placeholder(tf.uint8, shape=(batchSize, delta * 2, CROP_SIZE, CROP_SIZE, 3)) labelPlaceholder = tf.placeholder(tf.float32, shape=(batchSize, delta, 4)) if ',' in FLAGS.cuda_visible_devices: with tf.device('/gpu:0'): tfOutputs = network.inference(imageBatch, num_unrolls=delta, train=True, reuse=True) tfLossFull, tfLoss = network.loss(tfOutputs, labelsBatch) train_op = network.training(tfLossFull, learningRate) else: tfOutputs = network.inference(imageBatch, num_unrolls=delta, train=True, reuse=True) tfLossFull, tfLoss = network.loss(tfOutputs, labelsBatch) train_op = network.training(tfLossFull, learningRate) loss_summary_op = tf.summary.merge([ tf.summary.scalar('loss', tfLoss), tf.summary.scalar('l2_regularizer', tfLossFull - tfLoss), ]) train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) init = tf.global_variables_initializer() saver = tf.train.Saver() longSaver = tf.train.Saver() # Initialize the network and load saved parameters. sess.run(init) startIter = 0 if FLAGS.restore: print('Restoring') startIter = tf_util.restore_from_dir( sess, os.path.join(LOG_DIR, 'checkpoints')) if not debug: tt = time.localtime() time_str = ('%04d_%02d_%02d_%02d_%02d_%02d' % (tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec)) summary_writer = tf.summary.FileWriter( LOG_DIR + '/train/' + time_str + '_n_' + str(delta) + '_b_' + str(batchSize), sess.graph) summary_full = tf.summary.merge_all() conv_var_list = [ v for v in tf.trainable_variables() if 'conv' in v.name and 'weight' in v.name and (v.get_shape().as_list()[0] != 1 or v.get_shape().as_list()[1] != 1 ) ] for var in conv_var_list: tf_util.conv_variable_summaries(var, scope=var.name.replace('/', '_')[:-2]) summary_with_images = tf.summary.merge_all() # Logging stuff robustness_ph = tf.placeholder(tf.float32, shape=[]) lost_targets_ph = tf.placeholder(tf.float32, shape=[]) mean_iou_ph = tf.placeholder(tf.float32, shape=[]) avg_ph = tf.placeholder(tf.float32, shape=[]) if FLAGS.run_val: val_gpu = None if FLAGS.val_device == '0' else FLAGS.val_device test_tracker = re3_tracker.CopiedRe3Tracker(sess, train_vars, val_gpu) test_runner = test_net.TestTrackerRunner(test_tracker) with tf.name_scope('test'): test_summary_op = tf.summary.merge([ tf.summary.scalar('robustness', robustness_ph), tf.summary.scalar('lost_targets', lost_targets_ph), tf.summary.scalar('mean_iou', mean_iou_ph), tf.summary.scalar('avg_iou_robustness', avg_ph), ]) if debug: cv2.namedWindow('debug', cv2.WINDOW_NORMAL) cv2.resizeWindow('debug', OUTPUT_WIDTH, OUTPUT_HEIGHT) sess.graph.finalize() try: timeTotal = 0.000001 numIters = 0 iteration = startIter # Run training iterations in the main thread. while iteration < FLAGS.max_steps: if (iteration - 1) % 10 == 0: currentTimeStart = time.time() startSolver = time.time() if debug: _, outputs, lossValue, images, labels, = sess.run( [train_op, tfOutputs, tfLoss, imageBatch, labelsBatch], feed_dict={learningRate: LEARNING_RATE}) debug_feed_dict = { imagePlaceholder: images, labelPlaceholder: labels, } else: if iteration % 10 == 0: _, lossValue, loss_summary = sess.run( [train_op, tfLoss, loss_summary_op], feed_dict={learningRate: LEARNING_RATE}) summary_writer.add_summary(loss_summary, iteration) else: _, lossValue = sess.run( [train_op, tfLoss], feed_dict={learningRate: LEARNING_RATE}) endSolver = time.time() numIters += 1 iteration += 1 timeTotal += (endSolver - startSolver) if timing and (iteration - 1) % 10 == 0: print('Iteration: %d' % (iteration - 1)) print('Loss: %.3f' % lossValue) print('Average Time: %.3f' % (timeTotal / numIters)) print('Current Time: %.3f' % (endSolver - startSolver)) if numIters > 20: print('Current Average: %.3f' % ((time.time() - currentTimeStart) / 10)) print('') # Save a checkpoint and remove old ones. if iteration % 500 == 0 or iteration == FLAGS.max_steps: checkpoint_file = os.path.join(LOG_DIR, 'checkpoints', 'model.ckpt') saver.save(sess, checkpoint_file, global_step=iteration) print("Saving checkpoint at " + checkpoint_file) if FLAGS.clearSnapshots: files = glob.glob(LOG_DIR + '/checkpoints/*') for file in files: basename = os.path.basename(file) if os.path.isfile(file) and str( iteration ) not in file and 'checkpoint' not in basename: os.remove(file) # Every once in a while save a checkpoint that isn't ever removed except by hand. if iteration % 10000 == 0 or iteration == FLAGS.max_steps: if not os.path.exists(LOG_DIR + '/checkpoints/long_checkpoints'): os.makedirs(LOG_DIR + '/checkpoints/long_checkpoints') checkpoint_file = os.path.join(LOG_DIR, 'checkpoints/long_checkpoints', 'model.ckpt') longSaver.save(sess, checkpoint_file, global_step=iteration) if not debug: if (numIters == 1 or iteration % 100 == 0 or iteration == FLAGS.max_steps): # Write out the full graph sometimes. if (numIters == 1 or iteration == FLAGS.max_steps): print('Running detailed summary') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() _, summary_str = sess.run( [train_op, summary_with_images], options=run_options, run_metadata=run_metadata, feed_dict={learningRate: LEARNING_RATE}) summary_writer.add_run_metadata( run_metadata, 'step_%07d' % iteration) elif iteration % 1000 == 0: _, summary_str = sess.run( [train_op, summary_with_images], feed_dict={learningRate: LEARNING_RATE}) print('Running image summary') else: print('Running summary') _, summary_str = sess.run( [train_op, summary_full], feed_dict={learningRate: LEARNING_RATE}) summary_writer.add_summary(summary_str, iteration) summary_writer.flush() if (FLAGS.run_val and (numIters == 1 or iteration % 500 == 0)): # Run a validation set eval in a separate thread. def test_func(test_iter_on): print('Starting test iter', test_iter_on) test_runner.reset() result = test_runner.run_test( dataset=FLAGS.val_dataset, display=False) summary_str = sess.run( test_summary_op, feed_dict={ robustness_ph: result['robustness'], lost_targets_ph: result['lostTarget'], mean_iou_ph: result['meanIou'], avg_ph: (result['meanIou'] + result['robustness']) / 2, }) summary_writer.add_summary(summary_str, test_iter_on) os.remove('results.json') print('Ending test iter', test_iter_on) test_thread = threading.Thread(target=test_func, args=(iteration, )) test_thread.start() if FLAGS.output: # Look at some of the outputs. print('new batch') images = debug_feed_dict[imagePlaceholder].astype( np.uint8).reshape( (batchSize, delta, 2, CROP_SIZE, CROP_SIZE, 3)) labels = debug_feed_dict[labelPlaceholder].reshape( (batchSize, delta, 4)) outputs = outputs.reshape((batchSize, delta, 4)) for bb in range(batchSize): for dd in range(delta): image0 = images[bb, dd, 0, ...] image1 = images[bb, dd, 1, ...] label = labels[bb, dd, :] xyxyLabel = label / 10 labelBox = xyxyLabel * CROP_PAD output = outputs[bb, dd, ...] xyxyPred = output / 10 outputBox = xyxyPred * CROP_PAD drawing.drawRect( image0, bb_util.xywh_to_xyxy( np.full((4, 1), .5) * CROP_SIZE), 2, [255, 0, 0]) drawing.drawRect(image1, xyxyLabel * CROP_SIZE, 2, [0, 255, 0]) drawing.drawRect(image1, xyxyPred * CROP_SIZE, 2, [255, 0, 0]) plots = [image0, image1] subplot = drawing.subplot(plots, 1, 2, outputWidth=OUTPUT_WIDTH, outputHeight=OUTPUT_HEIGHT, border=5) cv2.imshow('debug', subplot[:, :, ::-1]) cv2.waitKey(1) except: # Save if error or killed by ctrl-c. if not debug: print('Saving...') checkpoint_file = os.path.join(LOG_DIR, 'checkpoints', 'model.ckpt') saver.save(sess, checkpoint_file, global_step=iteration) raise
port = 9997 delta = 2 debug = False config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) dataset = Dataset(sess, delta, 1, port, debug) forwardNetworkImagePlaceholder = tf.placeholder(tf.uint8, shape=(2, CROP_SIZE, CROP_SIZE, 3)) prevLstmState = tuple( [tf.placeholder(tf.float32, shape=(1, LSTM_SIZE)) for _ in range(4)]) initialLstmState = tuple([np.zeros((1, LSTM_SIZE)) for _ in range(4)]) networkOutputs, state1, state2 = network.inference( forwardNetworkImagePlaceholder, num_unrolls=1, train=False, prevLstmState=prevLstmState, reuse=False) dataset.initialize_tf_placeholders(forwardNetworkImagePlaceholder, prevLstmState, networkOutputs, state1, state2) init = tf.global_variables_initializer() sess.run(init) ckpt = tf.train.get_checkpoint_state(LOG_DIR + '/checkpoints') if ckpt and ckpt.model_checkpoint_path: tf_util.restore(sess, ckpt.model_checkpoint_path) startIter = int(ckpt.model_checkpoint_path.split('-')[-1]) print('Restored', startIter) iteration = 0 while True: iteration += 1