def eval(): with tf.Graph().as_default(): with tf.device('/gpu:' + str(FLAGS.gpu)): pointclouds_pl, labels_pl, global_pl = MODEL.placeholder_inputs( BATCH_SIZE, NUM_POINT, NFEATURES, NUM_GLOB) batch = tf.Variable(0, trainable=False) alpha = tf.compat.v1.placeholder(tf.float32, shape=()) is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) pred, max_pool = MODEL.get_model(pointclouds_pl, is_training=is_training_pl, global_pl=global_pl, num_class=NUM_CATEGORIES) mu = tf.Variable(tf.zeros(shape=(FLAGS.n_clusters, FLAGS.max_dim)), name="mu", trainable=False) #k centroids classify_loss = MODEL.get_focal_loss(pred, labels_pl, NUM_CATEGORIES) kmeans_loss, stack_dist = MODEL.get_loss_kmeans( max_pool, mu, FLAGS.max_dim, FLAGS.n_clusters, alpha) saver = tf.compat.v1.train.Saver() config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True #config.log_device_placement = False sess = tf.compat.v1.Session(config=config) if FULL_TRAINING: saver.restore(sess, os.path.join(LOG_DIR, 'cluster.ckpt')) else: saver.restore(sess, os.path.join(LOG_DIR, 'model.ckpt')) print('model restored') ops = { 'pointclouds_pl': pointclouds_pl, 'labels_pl': labels_pl, 'is_training_pl': is_training_pl, 'global_pl': global_pl, 'mu': mu, 'stack_dist': stack_dist, 'kmeans_loss': kmeans_loss, 'pred': pred, 'alpha': alpha, 'max_pool': max_pool, 'classify_loss': classify_loss, } eval_one_epoch(sess, ops)
def eval(): with tf.Graph().as_default(): with tf.device('/gpu:' + str(FLAGS.gpu)): pointclouds_pl, truth_pl, labels_pl = MODEL.placeholder_inputs( BATCH_SIZE, NUM_POINT, NFEATURES) batch = tf.Variable(0, trainable=False) is_training_pl = tf.placeholder(tf.bool, shape=()) pred = MODEL.get_model(pointclouds_pl, is_training=is_training_pl, params=params, num_class=NUM_CATEGORIES) loss_CE = MODEL.get_loss_CE(pred, labels_pl) pred = tf.nn.softmax(pred) loss = loss_CE + MODEL.get_loss_CD( tf.multiply( tf.reshape(pred[:, :, 2], [BATCH_SIZE, NUM_POINT, 1]), pointclouds_pl[:, :, :3]), truth_pl) saver = tf.train.Saver() config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True sess = tf.Session(config=config) if FLAGS.modeln >= 0: saver.restore( sess, os.path.join(MODEL_PATH, 'model_{}.ckpt'.format(FLAGS.modeln))) else: saver.restore(sess, os.path.join(MODEL_PATH, 'model.ckpt')) print('model restored') ops = { 'pointclouds_pl': pointclouds_pl, 'labels_pl': labels_pl, 'truth_pl': truth_pl, 'is_training_pl': is_training_pl, 'pred': pred, 'loss': loss, } eval_one_epoch(sess, ops)
def train(): with tf.Graph().as_default(): with tf.device('/gpu:' + str(GPU_INDEX)): pointclouds_pl, labels_pl, global_pl = MODEL.placeholder_inputs( BATCH_SIZE, NUM_POINT, NUM_FEAT, NUM_GLOB) is_training_pl = tf.placeholder(tf.bool, shape=()) # Note the global_step=batch parameter to minimize. # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains. batch = tf.Variable(0) alpha = tf.placeholder(dtype=tf.float32, shape=()) bn_decay = get_bn_decay(batch) tf.summary.scalar('bn_decay', bn_decay) print("--- Get model and loss") pred, max_pool = MODEL.get_model( pointclouds_pl, is_training=is_training_pl, global_pl=global_pl, bn_decay=bn_decay, num_class=NUM_CLASSES, weight_decay=FLAGS.wd, ) mu = tf.Variable(tf.zeros(shape=(FLAGS.n_clusters, FLAGS.max_dim)), name="mu", trainable=True) #k centroids classify_loss = MODEL.get_focal_loss(pred, labels_pl, NUM_CLASSES) kmeans_loss, stack_dist = MODEL.get_loss_kmeans( max_pool, mu, FLAGS.max_dim, FLAGS.n_clusters, alpha) full_loss = kmeans_loss + classify_loss print("--- Get training operator") # Get training operator learning_rate = get_learning_rate(batch) tf.summary.scalar('learning_rate', learning_rate) if OPTIMIZER == 'momentum': optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) elif OPTIMIZER == 'adam': optimizer = tf.train.AdamOptimizer(learning_rate) train_op = optimizer.minimize(classify_loss, global_step=batch) train_op_full = optimizer.minimize(full_loss, global_step=batch) # Add ops to save and restore all the variables. saver = tf.train.Saver() # Create a session config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True config.log_device_placement = False sess = tf.Session(config=config) sess.run(tf.global_variables_initializer()) # Add summary writers merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph) test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph) # Init variables print( "Total number of weights for the model: ", np.sum([ np.prod(v.get_shape().as_list()) for v in tf.trainable_variables() ])) ops = { 'pointclouds_pl': pointclouds_pl, 'labels_pl': labels_pl, 'global_pl': global_pl, 'is_training_pl': is_training_pl, 'max_pool': max_pool, 'pred': pred, 'alpha': alpha, 'stack_dist': stack_dist, 'classify_loss': classify_loss, 'kmeans_loss': kmeans_loss, 'train_op': train_op, 'train_op_full': train_op_full, 'merged': merged, 'step': batch, } best_acc = -1 if FLAGS.min == 'loss': early_stop = np.inf else: early_stop = 0 earlytol = 0 for epoch in range(MAX_EPOCH): log_string('**** EPOCH %03d ****' % (epoch)) sys.stdout.flush() is_full_training = epoch > MAX_PRETRAIN lss = eval_one_epoch(sess, ops, test_writer, is_full_training) if is_full_training: save_path = saver.save(sess, os.path.join(LOG_DIR, 'cluster.ckpt')) else: save_path = saver.save(sess, os.path.join(LOG_DIR, 'model.ckpt')) log_string("Model saved in file: %s" % save_path) max_pool = train_one_epoch(sess, ops, train_writer, is_full_training) if epoch == MAX_PRETRAIN: centers = KMeans(n_clusters=FLAGS.n_clusters).fit( np.squeeze(max_pool)) centers = centers.cluster_centers_ sess.run(tf.assign(mu, centers))