def test_one_epoch(ops, pls, sess, saver, testset, epoch_num): feed_dict = {} begin_time = time.time() all_preds, all_labels = [], [] for i, feed_in in enumerate(testset): feats, labels = unpack_feats_labels(feed_in, FLAGS.num_gpus) feed_dict[pls['feats']] = feats feed_dict[pls['is_training']] = False preds = sess.run(ops['preds'], feed_dict) all_preds.append(preds) all_labels.append(labels) all_preds = np.concatenate(all_preds, axis=0) all_labels = np.concatenate(all_labels, axis=0) iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds) log_str( 'mean iou {:.5} overall iou {:5} \n mean acc {:5} overall acc {:5} cost {} s' .format(miou, oiou, macc, oacc, time.time() - begin_time), FLAGS.log_file) checkpoint_path = os.path.join( FLAGS.save_dir, 'model{}_{:.3}.ckpt'.format(epoch_num, miou)) saver.save(sess, checkpoint_path)
def train_one_epoch(ops, pls, sess, summary_writer, trainset, epoch_num, feed_dict): total = 0 begin_time = time.time() for i, feed_in in enumerate(trainset): points_list, covars_list = unpack_feats_labels(feed_in, FLAGS.num_gpus) feed_dict[pls['points']] = points_list[:, :, :3] feed_dict[pls['covars']] = covars_list total += points_list.shape[0] sess.run(ops['apply_grad'], feed_dict) if i % FLAGS.log_step == 0: total_loss, summary, global_step = sess.run( [ops['total_loss'], ops['summary'], ops['global_step']], feed_dict) log_str( 'epoch {} step {} loss {:.5} | {:.5} examples/s'.format( epoch_num, i, total_loss / FLAGS.num_gpus, float(total) / (time.time() - begin_time)), FLAGS.log_file) summary_writer.add_summary(summary, global_step) total = 0 begin_time = time.time()
def test_one_epoch(ops, pls, sess, saver, testset, epoch_num, feed_dict): total_model = 0 begin_time = time.time() total_recon_losses = [] left_size = FLAGS.dump_num for i, feed_in in enumerate(testset): points_list, covars_list, labels_list, voxel_state_list=\ unpack_feats_labels(feed_in,FLAGS.num_gpus) feed_dict[pls['points']] = points_list feed_dict[pls['covars']] = covars_list feed_dict[pls['is_training']] = True feed_dict[pls['voxel_state']] = voxel_state_list total_model += points_list.shape[0] recon_loss_val, gen_state = sess.run( [ops['total_recon_loss'], ops['voxel_state']], feed_dict) total_recon_losses.append(recon_loss_val) if left_size > 0: for k in range(min(left_size, points_list.shape[0])): output_gen_points(points_list[k], voxel_state_list[k], gen_state[k], left_size, epoch_num, FLAGS.dump_dir) left_size -= 1 log_str( 'test epoch {} recon_loss {:.5} | {:.5} examples/s'.format( epoch_num, np.mean(np.asarray(total_recon_losses)), float(total_model) / (time.time() - begin_time)), FLAGS.log_file) checkpoint_path = os.path.join(FLAGS.save_dir, 'unsupervise{}.ckpt'.format(epoch_num)) saver.save(sess, checkpoint_path)
def train_one_epoch(ops, pls, sess, summary_writer, trainset, epoch_num): feed_dict = {} correct, total = 0, 0 begin_time = time.time() for i, feed_in in enumerate(trainset): feats, labels = unpack_feats_labels(feed_in, FLAGS.num_gpus) feed_dict[pls['feats']] = feats feed_dict[pls['labels']] = labels.flatten() feed_dict[pls['is_training']] = True _, correct_num = sess.run([ops['apply_grad'], ops['correct_num']], feed_dict) correct += correct_num total += labels.shape[0] if i % FLAGS.log_step == 0: total_loss, summary, global_step = sess.run( [ops['total_loss'], ops['summary'], ops['global_step']], feed_dict) accuracy = float(correct) / total log_str( 'epoch {} step {} loss {:.5} accuracy {:.5} | {:.5} examples/s' .format(epoch_num, i, total_loss, accuracy, float(total) / (time.time() - begin_time)), FLAGS.log_file) summary_writer.add_summary(summary, global_step) correct, total = 0, 0 begin_time = time.time()
def train_one_epoch(ops, pls, sess, summary_writer, trainset, epoch_num, feed_dict): total_model = 0 begin_time = time.time() total_recon_losses = [] for i, feed_in in enumerate(trainset): points_list, covars_list, labels_list, voxel_state_list=\ unpack_feats_labels(feed_in,FLAGS.num_gpus) feed_dict[pls['points']] = points_list feed_dict[pls['covars']] = covars_list feed_dict[pls['is_training']] = True feed_dict[pls['voxel_state']] = voxel_state_list total_model += points_list.shape[0] _, recon_loss_val = sess.run( [ops['apply_grad'], ops['total_recon_loss']], feed_dict) total_recon_losses.append(recon_loss_val) if i % FLAGS.log_step == 0: summary, global_step, gen_state = sess.run( [ops['summary'], ops['global_step'], ops['voxel_state']], feed_dict) log_str( 'epoch {} step {} recon_loss {:.5} | {:.5} examples/s'.format( epoch_num, i, np.mean(np.asarray(total_recon_losses)), float(total_model) / (time.time() - begin_time)), FLAGS.log_file) summary_writer.add_summary(summary, global_step) total_model = 0 begin_time = time.time() total_recon_losses = []
def test_one_epoch(ops, pls, sess, saver, testset, epoch_num, feed_dict): total = 0 begin_time = time.time() test_loss, test_recon_loss = [], [] all_preds, all_labels = [], [] left_size = FLAGS.dump_num for i, feed_in in enumerate(testset): points_list, covars_list, rpoints_list, labels_list, voxel_state_list, voxel_color_list=\ unpack_feats_labels(feed_in,FLAGS.num_gpus) feed_dict[pls['points']] = points_list feed_dict[pls['covars']] = covars_list feed_dict[pls['rpoints']] = rpoints_list feed_dict[pls['labels']] = labels_list feed_dict[pls['voxel_state']] = voxel_state_list feed_dict[pls['voxel_color']] = voxel_color_list total += points_list.shape[0] _, loss, recon_loss, preds, gen_color, gen_state = sess.run([ ops['apply_recon_grad'], ops['total_loss'], ops['total_recon_loss'], ops['preds'], ops['voxel_color'], ops['voxel_state'] ], feed_dict) test_loss.append(loss) test_recon_loss.append(recon_loss) all_preds.append(preds.flatten()) all_labels.append(labels_list.flatten()) if left_size > 0 and random.random() < 0.3: output_gen_points(points_list, voxel_state_list, voxel_color_list, gen_state, gen_color, left_size, epoch_num) left_size -= 1 all_preds = np.concatenate(all_preds, axis=0) all_labels = np.concatenate(all_labels, axis=0) test_loss = np.mean(np.asarray(test_loss)) test_recon_loss = np.mean(np.asarray(test_recon_loss)) iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds) if not FLAGS.eval: log_str( 'mean iou {:.5} overall iou {:.5} loss {:.5} recon_loss {:.5} \n mean acc {:.5} overall acc {:.5} cost {:.3} s' .format(miou, oiou, test_loss, test_recon_loss, macc, oacc, time.time() - begin_time), FLAGS.log_file) checkpoint_path = os.path.join(FLAGS.save_dir, 'unsupervise{}.ckpt'.format(epoch_num)) saver.save(sess, checkpoint_path) else: print 'mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s'.format( miou, oiou, test_loss, macc, oacc, time.time() - begin_time) names = get_class_names() for name, iou_val in zip(names, iou): print '{} : {}'.format(name, iou_val)
def test_one_epoch(ops, pls, sess, saver, testset, epoch_num, feed_dict): total = 0 begin_time = time.time() test_loss = [] left_size = FLAGS.dump_num for i, feed_in in enumerate(testset): points_list, covars_list = unpack_feats_labels(feed_in, FLAGS.num_gpus) feed_dict[pls['points']] = points_list[:, :, :3] feed_dict[pls['covars']] = covars_list total += points_list.shape[0] loss, gen_pts = sess.run([ops['total_loss'], ops['gen_pts']], feed_dict) test_loss.append(loss / FLAGS.num_gpus) # output generated points for _ in range(3): if left_size > 0 and random.random() < 0.8: idx = np.random.randint(0, points_list.shape[0], dtype=np.int) # colors=np.asarray(points_list[idx,:,3:]*128+128,dtype=np.int) # fn=os.path.join(FLAGS.dump_dir,'{}_{}_true.txt'.format(epoch_num,left_size)) # output_points(fn,points_list[idx,:,:3],colors) fn = os.path.join( FLAGS.dump_dir, '{}_{}_true.txt'.format(epoch_num, left_size)) output_points(fn, points_list[idx, :, :3]) # colors=np.asarray(gen_pts[idx,:,3:]*128+128,dtype=np.int) # colors[colors>255]=255 # colors[colors<0]=0 # fn=os.path.join(FLAGS.dump_dir,'{}_{}_recon.txt'.format(epoch_num,left_size)) # output_points(fn,gen_pts[idx,:,:3],colors) fn = os.path.join( FLAGS.dump_dir, '{}_{}_recon.txt'.format(epoch_num, left_size)) output_points(fn, gen_pts[idx, :, :3]) left_size -= 1 test_loss = np.mean(np.asarray(test_loss)) log_str( 'epoch {} test_loss {} cost {} s'.format(epoch_num, test_loss, time.time() - begin_time), FLAGS.log_file) checkpoint_path = os.path.join(FLAGS.save_dir, 'unsupervise{}.ckpt'.format(epoch_num)) saver.save(sess, checkpoint_path)
def train_one_epoch(ops, pls, sess, summary_writer, trainset, epoch_num, feed_dict): epoch_begin = time.time() total_correct, total_block, total_points = 0, 0, 0 begin_time = time.time() total_losses = [] for i, feed_in in enumerate(trainset): points_list, labels_list, nidxs_list, nidxs_lens_list, nidxs_bgs_list=\ unpack_feats_labels(feed_in,FLAGS.num_gpus) for k in xrange(FLAGS.num_gpus): feed_dict[pls['points'][k]] = points_list[k] feed_dict[pls['labels'][k]] = labels_list[k].flatten() feed_dict[pls['nidxs'][k]] = nidxs_list[k] feed_dict[pls['nidxs_lens'][k]] = nidxs_lens_list[k] feed_dict[pls['nidxs_bgs'][k]] = nidxs_bgs_list[k] total_points += labels_list[k].shape[0] feed_dict[pls['is_training']] = True total_block += FLAGS.num_gpus _, loss_val, correct_num = sess.run( [ops['apply_grad'], ops['total_loss'], ops['correct_num']], feed_dict) total_losses.append(loss_val) total_correct += correct_num if i % FLAGS.log_step == 0: summary, global_step = sess.run( [ops['summary'], ops['global_step']], feed_dict) log_str( 'epoch {} step {} loss {:.5} acc {:.5} | {:.5} examples/s'. format(epoch_num, i, np.mean(np.asarray(total_losses)), float(total_correct) / total_points, float(total_block) / (time.time() - begin_time)), FLAGS.log_file) summary_writer.add_summary(summary, global_step) total_correct, total_block, total_points = 0, 0, 0 begin_time = time.time() total_losses = [] log_str('epoch {} cost {} s'.format(epoch_num, time.time() - epoch_begin), FLAGS.log_file)
def test_one_epoch(ops, pls, sess, saver, testset, epoch_num, feed_dict): begin_time = time.time() test_loss = [] all_preds, all_labels = [], [] for i, feed_in in enumerate(testset): points_list, labels_list = unpack_feats_labels(feed_in, FLAGS.num_gpus) for k in xrange(FLAGS.num_gpus): feed_dict[pls['points'][k]] = np.expand_dims(points_list[k], axis=0) feed_dict[pls['labels'][k]] = np.expand_dims( labels_list[k].flatten(), axis=0) all_labels.append(labels_list[k].flatten()) feed_dict[pls['is_training']] = False loss, preds = sess.run([ops['total_loss'], ops['preds']], feed_dict) test_loss.append(loss) all_preds.append(preds) all_preds = np.concatenate(all_preds, axis=0) all_labels = np.concatenate(all_labels, axis=0) test_loss = np.mean(np.asarray(test_loss)) iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds) if not FLAGS.eval: log_str( 'mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s' .format(miou, oiou, test_loss, macc, oacc, time.time() - begin_time), FLAGS.log_file) checkpoint_path = os.path.join(FLAGS.save_dir, 'unsupervise{}.ckpt'.format(epoch_num)) saver.save(sess, checkpoint_path) else: print 'mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s'.format( miou, oiou, test_loss, macc, oacc, time.time() - begin_time) names = get_class_names() for name, iou_val in zip(names, iou): print '{} : {}'.format(name, iou_val)
def test_one_epoch(ops,pls,sess,saver,testset,epoch_num,feed_dict): total=0 begin_time=time.time() test_loss,test_recon_loss=[],[] all_preds,all_labels=[],[] left_size=FLAGS.dump_num for i,feed_in in enumerate(testset): points_list, covars_list, labels_list, voxel_state_list= unpack_feats_labels(feed_in,FLAGS.num_gpus) feed_dict[pls['points']]=points_list feed_dict[pls['covars']]=covars_list feed_dict[pls['labels']]=labels_list[:,0] feed_dict[pls['is_training']]=False feed_dict[pls['voxel_state']]=voxel_state_list total+=points_list.shape[0] loss,recon_loss,preds,gen_state=sess.run([ops['total_loss'],ops['total_recon_loss'], ops['preds'],ops['voxel_state']],feed_dict) test_loss.append(loss) test_recon_loss.append(recon_loss) all_preds.append(preds.flatten()) all_labels.append(labels_list.flatten()) if left_size > 0 and random.random() < 0.3: output_gen_points(points_list,voxel_state_list,gen_state,left_size,epoch_num) left_size-=1 all_preds=np.concatenate(all_preds,axis=0) all_labels=np.concatenate(all_labels,axis=0) test_loss=np.mean(np.asarray(test_loss)) test_recon_loss=np.mean(np.asarray(test_recon_loss)) acc,macc,oacc=compute_acc(all_labels,all_preds,FLAGS.num_classes) log_str('mean acc {:.5} overall acc {:.5} loss {:.5} recon_loss {:.5} cost {:.3} s'.format( macc, oacc, test_loss, test_recon_loss, time.time()-begin_time ),FLAGS.log_file) checkpoint_path = os.path.join(FLAGS.save_dir, 'unsupervise{}.ckpt'.format(epoch_num)) saver.save(sess,checkpoint_path)
def train_one_epoch(ops, pls, sess, summary_writer, trainset, epoch_num, feed_dict): total_correct, total_block, total_points = 0, 0, 0 begin_time = time.time() total_recon_losses, total_losses = [], [] for i, feed_in in enumerate(trainset): points_list, covars_list, rpoints_list, labels_list, voxel_state_list, voxel_color_list=\ unpack_feats_labels(feed_in,FLAGS.num_gpus) feed_dict[pls['points']] = points_list feed_dict[pls['covars']] = covars_list feed_dict[pls['rpoints']] = rpoints_list feed_dict[pls['labels']] = labels_list feed_dict[pls['voxel_state']] = voxel_state_list feed_dict[pls['voxel_color']] = voxel_color_list total_block += points_list.shape[0] total_points += points_list.shape[0] * points_list.shape[1] _, loss_val, recon_loss_val, correct_num = sess.run([ ops['apply_grad'], ops['total_loss'], ops['total_recon_loss'], ops['correct_num'] ], feed_dict) total_losses.append(loss_val) total_recon_losses.append(recon_loss_val) total_correct += correct_num if i % FLAGS.log_step == 0: summary, global_step = sess.run( [ops['summary'], ops['global_step']], feed_dict) log_str( 'epoch {} step {} loss {:.5} recon_loss {:.5} acc {:.5} | {:.5} examples/s' .format(epoch_num, i, np.mean(np.asarray(total_losses)), np.mean(np.asarray(total_recon_losses)), float(total_correct) / total_points, float(total_block) / (time.time() - begin_time)), FLAGS.log_file) summary_writer.add_summary(summary, global_step) total_correct, total_block, total_points = 0, 0, 0 begin_time = time.time() total_recon_losses, total_losses = [], []
def test_one_epoch(ops, pls, sess, saver, testset, epoch_num, feed_dict): total = 0 begin_time = time.time() test_loss = [] left_size = FLAGS.dump_num for i, feed_in in enumerate(testset): points_list, covars_list, voxel_state_list, voxel_color_list=\ unpack_feats_labels(feed_in,FLAGS.num_gpus) feed_dict[pls['points']] = points_list feed_dict[pls['covars']] = covars_list feed_dict[pls['voxel_state']] = voxel_state_list feed_dict[pls['voxel_color']] = voxel_color_list total += points_list.shape[0] loss, gen_state, gen_color = sess.run( [ops['total_loss'], ops['voxel_state'], ops['voxel_color']], feed_dict) test_loss.append(loss / FLAGS.num_gpus) # output generated voxels for i in range(3): if left_size > 0 and random.random() < 0.9: idx = np.random.randint(0, points_list.shape[0], dtype=np.int) pts = points_list[idx, :, :] pts[:, :2] += 0.5 pts[:, 3:] += 1.0 pts[:, 3:] *= 127 fn = os.path.join( FLAGS.dump_dir, '{}_{}_points.txt'.format(epoch_num, left_size)) output_points(fn, pts) true_state_pts = voxel2points(voxel_state_list[idx]) fn = os.path.join( FLAGS.dump_dir, '{}_{}_state_true.txt'.format(epoch_num, left_size)) output_points(fn, true_state_pts) gen_state[idx][gen_state[idx] < 0.0] = 0.0 gen_state[idx][gen_state[idx] > 1.0] = 1.0 pred_state_pts = voxel2points(gen_state[idx]) fn = os.path.join( FLAGS.dump_dir, '{}_{}_state_pred.txt'.format(epoch_num, left_size)) output_points(fn, pred_state_pts) true_color_pts = voxel2points(voxel_color_list[idx]) fn = os.path.join( FLAGS.dump_dir, '{}_{}_color_true.txt'.format(epoch_num, left_size)) output_points(fn, true_color_pts) gen_color[idx][gen_color[idx] < 0.0] = 0.0 gen_color[idx][gen_color[idx] > 1.0] = 1.0 pred_color_pts = voxel2points(gen_color[idx]) fn = os.path.join( FLAGS.dump_dir, '{}_{}_color_pred.txt'.format(epoch_num, left_size)) output_points(fn, pred_color_pts) left_size -= 1 test_loss = np.mean(np.asarray(test_loss)) log_str( 'epoch {} test_loss {} cost {} s'.format(epoch_num, test_loss, time.time() - begin_time), FLAGS.log_file) checkpoint_path = os.path.join(FLAGS.save_dir, 'unsupervise{}.ckpt'.format(epoch_num)) saver.save(sess, checkpoint_path)
def test_one_epoch(ops,pls,sess,saver,testset,epoch_num,feed_dict): total=0 begin_time=time.time() test_loss=[] all_preds,all_labels=[],[] all_error_models,all_error_preds,all_error_gts=[],[],[] for i,feed_in in enumerate(testset): points_list, covars_list, labels_list= unpack_feats_labels(feed_in,FLAGS.num_gpus) feed_dict[pls['points']]=points_list feed_dict[pls['covars']]=covars_list feed_dict[pls['labels']]=labels_list[:,0] feed_dict[pls['is_training']]=False total+=points_list.shape[0] loss,preds=sess.run([ops['total_loss'],ops['preds']],feed_dict) test_loss.append(loss) preds=preds.flatten() labels_list=labels_list.flatten() all_preds.append(preds) all_labels.append(labels_list) mask=preds!=labels_list all_error_models.append(points_list[mask]) # n,k,3 all_error_preds.append(preds[mask]) # n all_error_gts.append(labels_list[mask]) # n all_preds=np.concatenate(all_preds,axis=0) all_labels=np.concatenate(all_labels,axis=0) test_loss=np.mean(np.asarray(test_loss)) acc,macc,oacc=compute_acc(all_labels,all_preds,FLAGS.num_classes) if not FLAGS.eval: log_str('mean acc {:.5} overall acc {:.5} loss {:.5} cost {:.3} s'.format( macc, oacc, test_loss, time.time()-begin_time ),FLAGS.log_file) checkpoint_path = os.path.join(FLAGS.save_dir, 'unsupervise{}.ckpt'.format(epoch_num)) saver.save(sess,checkpoint_path) else: print 'mean acc {:.5} overall acc {:.5} loss {:.5} cost {:.3} s'.format( macc, oacc, test_loss, time.time()-begin_time ) names=get_classes_name() for name,accuracy in zip(names,acc): print '{} : {}'.format(name,accuracy) if FLAGS.confusion_matrix: from s3dis.draw_util import plot_confusion_matrix plot_confusion_matrix(all_preds,all_labels,names,save_path=FLAGS.confusion_matrix_path) if FLAGS.output_error_models: from s3dis.draw_util import output_points all_error_models=np.concatenate(all_error_models,axis=0) all_error_preds=np.concatenate(all_error_preds,axis=0) all_error_gts=np.concatenate(all_error_gts,axis=0) error_num=all_error_gts.shape[0] assert np.sum(all_labels!=all_preds)==error_num for k in xrange(error_num): output_points(FLAGS.output_error_path+'{}_{}_{}.txt'.format( names[all_error_gts[k]],names[all_error_preds[k]],k),all_error_models[k])