def load_bop_state(self, bop_name, row): self.board[bop_name]["state"]["living"] = 1 self.board[bop_name]["state"]["ObjPass"] = 0 if row.ObjPass == 0 else 1 self.board[bop_name]["state"]["ObjHide"] = 0 if row.ObjHide == 0 else 1 self.board[bop_name]["state"]["ObjKeep"] = 0 if row.ObjKeep == 0 else 1 if row.ObjTire <= 2: Tire = row.ObjTire else: Tire = 2 self.board[bop_name]["state"]["ObjTire"] = to_categorical(Tire, 3) if row.ObjRound <= 1: Round = row.ObjRound else: Round = 1 self.board[bop_name]["state"]["ObjRound"] = Round if row.ObjAttack <= 1: Attack = row.ObjAttack else: Attack = 1 self.board[bop_name]["state"]["ObjAttack"] = Attack self.board[bop_name]["state"]["ObjSon"] = 0 if row.ObjSon == 0 else 1 self.board[bop_name]["state"]["ObjInto"] = 0 if row.ObjInto == 0 else 1 self.board[bop_name]["state"]["ObjBlood"] = to_categorical( row.ObjBlood, 4) self.board[bop_name]["state"]["ObjStep"] = to_categorical( row.ObjStep, 8) self.board[bop_name]["Pos"] = cvtInt6loc2HexOffset(row.ObjNewPos)
def train_epoch(train_loader, model, opt, scheduler, epoch, num_part, num_classes, io): train_loss = 0.0 count = 0.0 accuracy = [] shape_ious = 0.0 metrics = defaultdict(lambda: list()) model.train() for batch_id, (points, label, target, norm_plt) in tqdm(enumerate(train_loader), total=len(train_loader), smoothing=0.9): batch_size, num_point, _ = points.size() points, label, target, norm_plt = Variable(points.float()), Variable(label.long()), Variable(target.long()), \ Variable(norm_plt.float()) points = points.transpose(2, 1) norm_plt = norm_plt.transpose(2, 1) points, label, target, norm_plt = points.cuda(non_blocking=True), label.squeeze(1).cuda(non_blocking=True), \ target.cuda(non_blocking=True), norm_plt.cuda(non_blocking=True) # target: b,n seg_pred, loss = model(points, norm_plt, to_categorical(label, num_classes), target) # seg_pred: b,n,50 # instance iou without considering the class average at each batch_size: batch_shapeious = compute_overall_iou(seg_pred, target, num_part) # list of of current batch_iou:[iou1,iou2,...,iou#b_size] # total iou of current batch in each process: batch_shapeious = seg_pred.new_tensor([np.sum(batch_shapeious)], dtype=torch.float64) # same device with seg_pred!!! # Loss backward loss = torch.mean(loss) opt.zero_grad() loss.backward() opt.step() # accuracy seg_pred = seg_pred.contiguous().view(-1, num_part) # b*n,50 target = target.view(-1, 1)[:, 0] # b*n pred_choice = seg_pred.contiguous().data.max(1)[1] # b*n correct = pred_choice.eq(target.contiguous().data).sum() # torch.int64: total number of correct-predict pts # sum shape_ious += batch_shapeious.item() # count the sum of ious in each iteration count += batch_size # count the total number of samples in each iteration train_loss += loss.item() * batch_size accuracy.append(correct.item()/(batch_size * num_point)) # append the accuracy of each iteration # Note: We do not need to calculate per_class iou during training if args.scheduler == 'cos': scheduler.step() elif args.scheduler == 'step': if opt.param_groups[0]['lr'] > 0.9e-5: scheduler.step() if opt.param_groups[0]['lr'] < 0.9e-5: for param_group in opt.param_groups: param_group['lr'] = 0.9e-5 io.cprint('Learning rate: %f' % opt.param_groups[0]['lr']) metrics['accuracy'] = np.mean(accuracy) metrics['shape_avg_iou'] = shape_ious * 1.0 / count outstr = 'Train %d, loss: %f, train acc: %f, train ins_iou: %f' % (epoch+1, train_loss * 1.0 / count, metrics['accuracy'], metrics['shape_avg_iou']) io.cprint(outstr)
def to_vector(self): result = [ to_categorical(self.stage - 1, 20), self.city, self.__dict_to_array__(self.friendly_units), self.__dict_to_array__(self.enemy_units, enemy=True), self.red_win ] return np.hstack(result)
def get_bop_init(self, Pos): bop_state = {} bop_state["living"] = 1 bop_state["ObjPass"] = 0 bop_state["ObjHide"] = 0 bop_state["ObjKeep"] = 0 bop_state["ObjTire"] = to_categorical(0, 3) bop_state["ObjRound"] = 0 bop_state["ObjAttack"] = 0 bop_state["ObjSon"] = 0 bop_state["ObjInto"] = 0 bop_state["ObjBlood"] = to_categorical(3, 4) bop_state["ObjStep"] = to_categorical(6, 8) bop_Pos = cvtInt6loc2HexOffset(Pos) return bop_state, bop_Pos
def Attack(self, row, bop_name, stageid, group): if not row.AttackID: raise Exception('This row is not attacking happened') # update the attribution of the bops self.load_bop_state(bop_name, row) target_bop_name = self.Get_bop_name(row.TarID) TarBop_select = group.loc[(group["StageID"] <= stageid) & (group["ObjID"] == row.TarID)] if not TarBop_select.empty: SoldierID = int( TarBop_select.sort_values(by="DateAndTime", axis=0, ascending=True).iloc[-1]['ObjSon']) else: raise Exception if row.TarBlood == 0: self.board[target_bop_name]["state"], self.board[target_bop_name][ "Pos"] = self.get_bop_death() if SoldierID: soldier_name = self.Get_bop_name(SoldierID) self.board[soldier_name]["state"], self.board[soldier_name][ "Pos"] = self.get_bop_death() else: self.board[target_bop_name]["state"]["ObjBlood"] = to_categorical( row.TarBlood, 4) if row.TarKeep != 0: self.board[target_bop_name]["state"]["ObjKeep"] = 1 if SoldierID: soldier_name = self.Get_bop_name(SoldierID) soldier_blood = from_categorical( self.board[soldier_name]["state"] ["ObjBlood"]) - row.TarLost if soldier_blood == 0: self.board[soldier_name]["state"], self.board[ soldier_name]["Pos"] = self.get_bop_death() else: self.board[soldier_name]["state"][ "ObjBlood"] = to_categorical(soldier_blood, 4) # update the score self.get_score(row, SoldierID) # update the action self.init_acts() self.acts[bop_name]["shoot"] = 1
def test(args, io): # Dataloader test_data = PartNormalDataset(npoints=2048, split='test', normalize=False) print("The number of test data is:%d", len(test_data)) test_loader = DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, num_workers=args.workers, drop_last=False) # Try to load models num_part = 50 device = torch.device("cuda" if args.cuda else "cpu") # model = PAConv(num_part).to(device) model = models.__dict__[args.model](num_part).to(device) io.cprint(str(model)) from collections import OrderedDict state_dict = torch.load("checkpoints/%s/best_%s_model.pth" % (args.exp_name, args.model_type), map_location=torch.device('cpu'))['model'] new_state_dict = OrderedDict() for layer in state_dict: new_state_dict[layer.replace('module.', '')] = state_dict[layer] model.load_state_dict(new_state_dict) model.eval() num_part = 50 num_classes = 16 metrics = defaultdict(lambda: list()) hist_acc = [] shape_ious = [] total_per_cat_iou = np.zeros((16)).astype(np.float32) total_per_cat_seen = np.zeros((16)).astype(np.int32) for batch_id, (points, label, target, norm_plt) in tqdm(enumerate(test_loader), total=len(test_loader), smoothing=0.9): batch_size, num_point, _ = points.size() points, label, target, norm_plt = Variable(points.float()), Variable(label.long()), Variable(target.long()), Variable(norm_plt.float()) points = points.transpose(2, 1) norm_plt = norm_plt.transpose(2, 1) points, label, target, norm_plt = points.cuda(non_blocking=True), label.squeeze().cuda( non_blocking=True), target.cuda(non_blocking=True), norm_plt.cuda(non_blocking=True) with torch.no_grad(): seg_pred = model(points, norm_plt, to_categorical(label, num_classes)) # b,n,50 # instance iou without considering the class average at each batch_size: batch_shapeious = compute_overall_iou(seg_pred, target, num_part) # [b] shape_ious += batch_shapeious # iou +=, equals to .append # per category iou at each batch_size: for shape_idx in range(seg_pred.size(0)): # sample_idx cur_gt_label = label[shape_idx] # label[sample_idx] total_per_cat_iou[cur_gt_label] += batch_shapeious[shape_idx] total_per_cat_seen[cur_gt_label] += 1 # accuracy: seg_pred = seg_pred.contiguous().view(-1, num_part) target = target.view(-1, 1)[:, 0] pred_choice = seg_pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() metrics['accuracy'].append(correct.item() / (batch_size * num_point)) hist_acc += metrics['accuracy'] metrics['accuracy'] = np.mean(hist_acc) metrics['shape_avg_iou'] = np.mean(shape_ious) for cat_idx in range(16): if total_per_cat_seen[cat_idx] > 0: total_per_cat_iou[cat_idx] = total_per_cat_iou[cat_idx] / total_per_cat_seen[cat_idx] # First we need to calculate the iou of each class and the avg class iou: class_iou = 0 for cat_idx in range(16): class_iou += total_per_cat_iou[cat_idx] io.cprint(classes_str[cat_idx] + ' iou: ' + str(total_per_cat_iou[cat_idx])) # print the iou of each class avg_class_iou = class_iou / 16 outstr = 'Test :: test acc: %f test class mIOU: %f, test instance mIOU: %f' % (metrics['accuracy'], avg_class_iou, metrics['shape_avg_iou']) io.cprint(outstr)
def test_epoch(test_loader, model, epoch, num_part, num_classes, io): test_loss = 0.0 count = 0.0 accuracy = [] shape_ious = 0.0 final_total_per_cat_iou = np.zeros(16).astype(np.float32) final_total_per_cat_seen = np.zeros(16).astype(np.int32) metrics = defaultdict(lambda: list()) model.eval() # label_size: b, means each sample has one corresponding class for batch_id, (points, label, target, norm_plt) in tqdm(enumerate(test_loader), total=len(test_loader), smoothing=0.9): batch_size, num_point, _ = points.size() points, label, target, norm_plt = Variable(points.float()), Variable(label.long()), Variable(target.long()), \ Variable(norm_plt.float()) points = points.transpose(2, 1) norm_plt = norm_plt.transpose(2, 1) points, label, target, norm_plt = points.cuda(non_blocking=True), label.squeeze(1).cuda(non_blocking=True), \ target.cuda(non_blocking=True), norm_plt.cuda(non_blocking=True) seg_pred = model(points, norm_plt, to_categorical(label, num_classes)) # b,n,50 # instance iou without considering the class average at each batch_size: batch_shapeious = compute_overall_iou(seg_pred, target, num_part) # [b] # per category iou at each batch_size: for shape_idx in range(seg_pred.size(0)): # sample_idx cur_gt_label = label[shape_idx] # label[sample_idx], denotes current sample belongs to which cat final_total_per_cat_iou[cur_gt_label] += batch_shapeious[shape_idx] # add the iou belongs to this cat final_total_per_cat_seen[cur_gt_label] += 1 # count the number of this cat is chosen # total iou of current batch in each process: batch_ious = seg_pred.new_tensor([np.sum(batch_shapeious)], dtype=torch.float64) # same device with seg_pred!!! # prepare seg_pred and target for later calculating loss and acc: seg_pred = seg_pred.contiguous().view(-1, num_part) target = target.view(-1, 1)[:, 0] # Loss loss = F.nll_loss(seg_pred.contiguous(), target.contiguous()) # accuracy: pred_choice = seg_pred.data.max(1)[1] # b*n correct = pred_choice.eq(target.data).sum() # torch.int64: total number of correct-predict pts loss = torch.mean(loss) shape_ious += batch_ious.item() # count the sum of ious in each iteration count += batch_size # count the total number of samples in each iteration test_loss += loss.item() * batch_size accuracy.append(correct.item() / (batch_size * num_point)) # append the accuracy of each iteration for cat_idx in range(16): if final_total_per_cat_seen[cat_idx] > 0: # indicating this cat is included during previous iou appending final_total_per_cat_iou[cat_idx] = final_total_per_cat_iou[cat_idx] / final_total_per_cat_seen[cat_idx] # avg class iou across all samples metrics['accuracy'] = np.mean(accuracy) metrics['shape_avg_iou'] = shape_ious * 1.0 / count outstr = 'Test %d, loss: %f, test acc: %f test ins_iou: %f' % (epoch + 1, test_loss * 1.0 / count, metrics['accuracy'], metrics['shape_avg_iou']) io.cprint(outstr) # Write to tensorboard # writer.add_scalar('loss_train', test_loss * 1.0 / count, epoch + 1) # writer.add_scalar('Acc_train', metrics['accuracy'], epoch + 1) # writer.add_scalar('ins_iou', metrics['shape_avg_iou']) return metrics, final_total_per_cat_iou
def test(args, io): # Try to load models num_part = 50 device = torch.device("cuda" if args.cuda else "cpu") model = PAConv(args, num_part).to(device) io.cprint(str(model)) from collections import OrderedDict state_dict = torch.load("checkpoints/%s/best_insiou_model.pth" % args.exp_name, map_location=torch.device('cpu'))['model'] new_state_dict = OrderedDict() for layer in state_dict: new_state_dict[layer.replace('module.', '')] = state_dict[layer] model.load_state_dict(new_state_dict) # Dataloader test_data = PartNormalDataset(npoints=2048, split='test', normalize=False) print("The number of test data is:%d", len(test_data)) test_loader = DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, num_workers=args.workers, drop_last=False) NUM_PEPEAT = 100 NUM_VOTE = 10 global_Class_mIoU, global_Inst_mIoU = 0, 0 global_total_per_cat_iou = np.zeros((16)).astype(np.float32) num_part = 50 num_classes = 16 pointscale = PointcloudScale(scale_low=0.87, scale_high=1.15) model.eval() for i in range(NUM_PEPEAT): metrics = defaultdict(lambda: list()) shape_ious = [] total_per_cat_iou = np.zeros((16)).astype(np.float32) total_per_cat_seen = np.zeros((16)).astype(np.int32) for batch_id, (points, label, target, norm_plt) in tqdm(enumerate(test_loader), total=len(test_loader), smoothing=0.9): batch_size, num_point, _ = points.size() points, label, target, norm_plt = Variable(points.float()), Variable(label.long()), Variable( target.long()), Variable(norm_plt.float()) # points = points.transpose(2, 1) norm_plt = norm_plt.transpose(2, 1) points, label, target, norm_plt = points.cuda(non_blocking=True), label.squeeze().cuda( non_blocking=True), target.cuda(non_blocking=True), norm_plt.cuda(non_blocking=True) seg_pred = 0 new_points = Variable(torch.zeros(points.size()[0], points.size()[1], points.size()[2]).cuda(), volatile=True) for v in range(NUM_VOTE): if v > 0: new_points.data = pointscale(points.data) with torch.no_grad(): seg_pred += F.softmax( model(points.contiguous().transpose(2, 1), new_points.contiguous().transpose(2, 1), norm_plt, to_categorical(label, num_classes)), dim=2) # xyz,x: only scale feature input seg_pred /= NUM_VOTE # instance iou without considering the class average at each batch_size: batch_shapeious = compute_overall_iou(seg_pred, target, num_part) # [b] shape_ious += batch_shapeious # iou +=, equals to .append # per category iou at each batch_size: for shape_idx in range(seg_pred.size(0)): # sample_idx cur_gt_label = label[shape_idx] # label[sample_idx] total_per_cat_iou[cur_gt_label] += batch_shapeious[shape_idx] total_per_cat_seen[cur_gt_label] += 1 # accuracy: seg_pred = seg_pred.contiguous().view(-1, num_part) target = target.view(-1, 1)[:, 0] pred_choice = seg_pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() metrics['accuracy'].append(correct.item() / (batch_size * num_point)) metrics['shape_avg_iou'] = np.mean(shape_ious) for cat_idx in range(16): if total_per_cat_seen[cat_idx] > 0: total_per_cat_iou[cat_idx] = total_per_cat_iou[cat_idx] / total_per_cat_seen[cat_idx] print('\n------ Repeat %3d ------' % (i + 1)) # First we need to calculate the iou of each class and the avg class iou: class_iou = 0 for cat_idx in range(16): class_iou += total_per_cat_iou[cat_idx] io.cprint(classes_str[cat_idx] + ' iou: ' + str(total_per_cat_iou[cat_idx])) # print the iou of each class avg_class_iou = class_iou / 16 outstr = 'Test :: test class mIOU: %f, test instance mIOU: %f' % (avg_class_iou, metrics['shape_avg_iou']) io.cprint(outstr) if avg_class_iou > global_Class_mIoU: global_Class_mIoU = avg_class_iou global_total_per_cat_iou = total_per_cat_iou if metrics['shape_avg_iou'] > global_Inst_mIoU: global_Inst_mIoU = metrics['shape_avg_iou'] # final avg print: final_out_str = 'Best voting result :: test class mIOU: %f, test instance mIOU: %f' % (global_Class_mIoU, global_Inst_mIoU) io.cprint(final_out_str) # final per cat print: for cat_idx in range(16): io.cprint(classes_str[cat_idx] + ' iou: ' + str(global_total_per_cat_iou[cat_idx])) # print iou of each class