Example #1
0
 def advance(self):
     self.level += 1
     self.danger += self.base['danger_step'] + self.spec.danger_step
     self.mystery += self.base['mystery_step'] + self.spec.mystery_step
     
     total = self.base['random_step'] + self.spec.random_step
     sides = total + 1 - 2*(total//4) # Must have opposite parity to total.
     step = dice(3, sides) + (total - 3*(sides + 1))/2
     if self.danger + step < 0 or self.mystery + total - step < 0:
         step = total - step
     self.danger += step
     self.mystery += total - step
Example #2
0
def val_step(step, patch, groundtruth):
    linear, pred_seg = model(patch, training=False)
    losses = dice_loss(groundtruth, pred_seg)

    # record the val loss and accuracy (记录loss和准确率)
    val_loss.update_state(losses)
    val_acc.update_state(dice(groundtruth, pred_seg))

    tf.summary.image("image", patch, step=step)
    tf.summary.image("image transform", linear, step=step)
    tf.summary.image("groundtruth", groundtruth * 255, step=step)
    tf.summary.image("pred", pred_seg, step=step)
    log_writer.flush()
Example #3
0
def train_step(step, patch, groundtruth):
    with tf.GradientTape() as tape:
        linear, pred_seg = model(patch, training=True)
        losses = dice_loss(groundtruth, pred_seg)

    # calculate the gradient (求梯度)
    grads = tape.gradient(losses, model.trainable_variables)
    # bp (反向传播)
    optimizer.apply_gradients(zip(grads, model.trainable_variables))

    # record the training loss and accuracy (记录loss和准确率)
    train_loss.update_state(losses)
    train_acc.update_state(dice(groundtruth, pred_seg))
Example #4
0
 def damage(self):
     if self.owner:
         return dice((self.level / 5 + 1),
                     ((self.owner.fighter.DEX + self._damage)) / 2,
                     (self.level / 5))
 def damage(self):
     if self.owner:
         return dice((self.level / 3 + 1),
                     ((self.owner.fighter.STR + self._damage)) / 2,
                     (self.level / 3))
 def damage(self):
     if self.owner:
         return dice((self.level/3+1), (self.owner.fighter.INT / 2) + self._damage, self.level/2)
Example #7
0
    def skill_process(self, skill):

        owner = skill.owner
        target = self.owner

        message = ""
        results = []
        target_name = target.name.capitalize()
        skill_name = skill.name

        damage = skill.damage
        attr = skill.attr
        hit_rate = skill.hit_rate

        # カウンターチェック
        check = self.other_counter_check(skill, owner, target)
        if check:
            results.extend(check)
            # death chaeck
            if target.fighter.hp < 1 or target.state == state.TURN_END:
                return results

        # (命中率)% =(α/100)*(1ー (β / 100))* 100
        # 命中率(α)=95、回避率(β)=5
        if target.state != state.STUN and Tag.player in target.tag:
            target.form = form.DEFENSE
        if hit_rate:
            hit_chance = ((hit_rate - self.DEX + owner.fighter.DEX) /
                          100) * (1 - (self.evasion / 100)) * 100
            if random.randrange(1, 100) <= hit_chance:
                # ヒット
                message = f"Hit"
                hit_particle(target=target)

                # critical_flag:
                if random.randrange(
                        1, (100 + target.fighter.DEX)) < 3 + owner.fighter.DEX:
                    damage = skill.damage * 2
                    message += " CRITICAL!"

                # 物理防御処理
                elif attr == "physical":
                    defens_p = target.fighter.level // 3
                    damage = damage - dice(defens_p,
                                           defens_p + target.fighter.defense,
                                           target.fighter.level)

            else:
                # 回避
                results.append({"damage_pop": target, "damage": "MISS"})
                results.append(
                    {"message": f"{target_name} Avoided {skill_name}"})
                return results

        if target.fighter.resist[attr] <= 0:
            damage *= 2.5  # 弱点ダメージ
        else:
            damage = damage / target.fighter.resist[attr]

        # 完全防御
        if damage < 1:
            message += f" But {target_name} was undamaged."
            results.extend([{
                "message": message
            }, {
                "damage_pop": target,
                "damage": "Guard!"
            }])
            return results

        elif damage >= 1:
            if skill.anime and Tag.range_attack not in skill.tag:
                Hit_Anime(skill, target)
            message += f" {target_name} took {int(damage)} damage!"
            results.append({"message": f"from {skill_name}"})
            results.append({"message": message})

            damage = int(damage)

            results.extend(target.fighter.change_hp(damage, target))

            if skill.effect:
                owner.fighter.effect_hit_chance(skill.effect, target)

        return results
Example #8
0
def eval_function(test_groundtruth_path_list):
    dice_list = []
    roc_list = []
    pr_list = []
    tpr_list = []
    tnr_list = []
    sum_tp = 0
    sum_tn = 0
    sum_fp = 0
    sum_fn = 0

    roc = tf.keras.metrics.AUC(num_thresholds=200, curve="ROC")
    pr = tf.keras.metrics.AUC(num_thresholds=200, curve="PR")
    recall = tf.keras.metrics.Recall()

    tn = tf.keras.metrics.TrueNegatives()
    tp = tf.keras.metrics.TruePositives()
    fn = tf.keras.metrics.FalseNegatives()
    fp = tf.keras.metrics.FalsePositives()

    for idx in range(len(test_groundtruth_path_list)):
        name = test_groundtruth_path_list[idx].split("/")[-1].split(
            ".")[0].split("_")[0]
        roc.reset_states()
        pr.reset_states()
        tn.reset_states()
        fn.reset_states()
        tp.reset_states()
        fp.reset_states()

        groundtruth = plt.imread(test_groundtruth_path_list[idx])
        preds = plt.imread(test_save_dir + str(int(name)) + ".png")
        groundtruth = np.array(groundtruth, dtype=np.float32)
        groundtruth = groundtruth / 255.0

        roc.update_state(groundtruth, preds[:, :, 0])  # png image is 4-channel
        pr.update_state(groundtruth, preds[:, :, 0])
        tn.update_state(groundtruth, preds[:, :, 0])
        tp.update_state(groundtruth, preds[:, :, 0])
        fn.update_state(groundtruth, preds[:, :, 0])
        fp.update_state(groundtruth, preds[:, :, 0])

        dice_list.append(dice(groundtruth, preds[:, :, 0]).numpy())
        roc_list.append(roc.result().numpy())
        pr_list.append(pr.result().numpy())

        current_tn = tn.result().numpy()
        current_tp = tp.result().numpy()
        current_fn = fn.result().numpy()
        current_fp = fp.result().numpy()

        sum_tp += current_tp
        sum_tn += current_tn
        sum_fp += current_fp
        sum_fn += current_fn

        tpr_list.append(current_tp / (current_tp + current_fn))
        tnr_list.append(current_tn / (current_tn + current_fp))

    print("average dice score for all predict vessel masks:",
          np.mean(dice_list))
    print("average AUC for all predict vessel masks:", np.mean(roc_list))
    print("average PR for all predict vessel masks:", np.mean(pr_list))
    print("average recall(sensitivity) for all predict vessel masks:",
          np.mean(tpr_list))
    print("average specificity for all predict vessel masks:",
          np.mean(tnr_list))

    return sum_tp, sum_fn, sum_fp, sum_tn