# whole_train_data_idx = np.concatenate(train_data_idx) # whole_num_train = num_train for rd in range(1, num_round): #print('Round {}'.format(rd)) ##### Start Training Epochs for epoch in range(0, args.Epoch): #for epoch in range(0,1): if args.ExpSum: fid = open(summary_filepath, 'a') else: fid = None printout('\n\nstart {:d}-th round {:d}-th epoch at {}\n'.format( rd, epoch, time.ctime()), write_flag=args.ExpSum, fid=fid) #### Shuffle Training Data --- close data_sort = Loader.Shuffle_TrainSet() #### Train One Epoch train_avg_loss, train_avg_acc = TrainOp.TrainOneEpoch( Loader, file_idx_list, data_idx_list, pts_idx_list) printout('\nTrainingSet Avg Loss {:.4f} Avg Acc {:.2f}%'.format( train_avg_loss, 100 * train_avg_acc), write_flag=args.ExpSum, fid=fid) #### Evaluate One Epoch
##### Initialize Training Operations TrainOp = util.ShapeNet_IncompleteSup() TrainOp.SetLearningRate(LearningRate=args.LearningRate, BatchSize=args.batchsize) ##### Define Network TrainOp.DGCNN_SemiSup(batch_size=args.batchsize, point_num=3000) ##### Restore Checkpoint #best_filepath = os.path.join(CHECKPOINT_PATH, 'Checkpoint_round-{}'.format('5')) best_filepath = os.path.join(CHECKPOINT_PATH, 'Checkpoint_epoch-{}'.format('best')) TrainOp.RestoreCheckPoint(best_filepath) ##### Start Testing printout('\n\nstart Inference at {}\n'.format(time.ctime())) #### Evaluate avg_loss, avg_acc, perdata_miou, pershape_miou = TrainOp.Test(Loader, Eval) print( '\nAvg Loss {:.4f} Avg Acc {:.3f}% Avg PerData IoU {:.3f}% Avg PerCat IoU {:.3f}%' .format(avg_loss, 100 * avg_acc, 100 * np.mean(perdata_miou), 100 * np.mean(pershape_miou)), end='') string = '\nEval PerShape IoU:' for iou in pershape_miou: string += ' {:.2f}%'.format(100 * iou) print(string)
pts_idx_list = [] for b_i in range(tmp['pts_idx_list'].shape[1]): pts_idx_list.append(tmp['pts_idx_list'][0, b_i][0]) else: pts_idx_list = tmp['pts_idx_list'] ##### Start Training Epochs for epoch in range(0, args.Epoch): if args.ExpRslt: fid = open(summary_filepath, 'a') else: fid = None printout('\n\nstart training {:d}-th epoch at {}\n'.format( epoch, time.ctime()), write_flag=args.ExpRslt, fid=fid) #### Shuffle Training Data Loader.Shuffle_TrainSet() #### Train One Epoch if args.Style == 'Full': train_avg_loss, train_avg_acc = TrainOp.TrainOneEpoch_Full( Loader, pts_idx_list, args.batchsize) elif args.Style == 'Plain': train_avg_loss, train_avg_acc = TrainOp.TrainOneEpoch( Loader, pts_idx_list, args.batchsize) printout('\nTrainingSet Avg Loss {:.4f} Avg Acc {:.2f}%'.format( train_avg_loss, 100 * train_avg_acc),