コード例 #1
0
    def add_claim(self, goal, claim, explanation):
        """
        Add a claim to the goal in the graph. This updates the `claims` field
        for an :class:`etb.datalog.graph.Annotation` corresponding to `goal
        This updates the `claims` field for an
        :class:`etb.datalog.graph.Annotation` corresponding to `goal`.

        :parameters:
            - `goal`: the internal representation of the goal to which the
              matching claim should be added
            - `claim`: the internal representation of a claim literal that
              should be added to the `goal`

        :returntype:
            `None`
        """
        frozen = model.freeze(goal)
        annotation_goal = self.get_annotation(frozen)
        if not annotation_goal:
            # create annotation if not existing yet
            self.add_goal(goal)
            annotation_goal = self.get_annotation(frozen)

        if isinstance(claim, list):
            fclaim = model.freeze(claim)
        elif isinstance(claim, PendingRule):
            fclaim = claim.clause
        else:
            fclaim = claim
        if fclaim not in annotation_goal.claims:
            assert isinstance(fclaim, tuple)
            annotation_goal.claims.append(fclaim)
            self.log.info(
                'graph.add_claim: explanation = {0}'.format(explanation))
            annotation_goal.explanations.append(explanation)
コード例 #2
0
    def __init__(self, hparams):
        super().__init__(hparams)
        self.M_maps = []
        self.teacher_model: ResNet_CIFAR = get_classifier(
            hparams["backbone"], hparams["dataset"])
        freeze(self.teacher_model.eval())

        self.plane_model = nn.ModuleList()
        self.teacher_start_layer = 0
        self.last_channel = self.params['input_channel']
        self.init_student()
コード例 #3
0
 def propagate_claims(self, subgoal, prule):
     """
     Applies the propagate rule to propagate any existing claims from an
     already extant subgoal to the rule to create new pending rules. 
     """
     self.log.debug('inference.propagate_claims: subgoal: {0}'
                   .format(self.term_factory.close_literal(subgoal)))
     self.log.debug('inference.propagate_claims: rule: {0}'
                   .format(self.term_factory.close_literals(prule.clause)))
     fsubgoal = model.freeze(subgoal)
     annotation_subgoal = self.logical_state.db_get_annotation(fsubgoal)
     fclause = prule.clause
     subgoal_index = self.logical_state.goal_dependencies.get_subgoal_index(prule)
     self.log.debug('inference.propagate_claims: annotation_subgoal = {0}, subgoal_index = {1}'.format(annotation_subgoal, subgoal_index))
     if annotation_subgoal:
         subgoal_claims = annotation_subgoal.claims
         subgoal_explanations = annotation_subgoal.explanations
         max_subgoal_claims = len(subgoal_claims)
         assert len(subgoal_explanations) == max_subgoal_claims, 'length mismatch: {0} != {1}'.format(max_subgoal_claims, len(subgoal_explanations))
         self.log.debug('inference.propagate_claims: subgoal_claims = {}'
                        .format([self.term_factory.close_literals(
                            sc.clause if isinstance(sc, graph.PendingRule) else sc)
                                 for sc in subgoal_claims]))
         self.log.debug('inference.propagate_claims: max_subgoal_claims = {}'.format(max_subgoal_claims))
         for i in range(subgoal_index, max_subgoal_claims):
             self.propagate_claim_to_pending_clause(subgoal_claims[i], subgoal_explanations[i], prule)
コード例 #4
0
    def add_pending_rule_to_goal(self, prule, goal):
        """
        Add a dependency from a pending rule to a goal to the dependency graph.

        :parameters:
            - `rule`: an internal representation of a clause
            - `goal`: an internal representation of a goal

        :returntype:
            `None`
        """
        assert (isinstance(prule, PendingRule))
        assert self.get_annotation(prule)
        self.add_goal(goal)
        self.graph[prule].append(model.freeze(goal))
        self.parents[model.freeze(goal)].append(prule)
コード例 #5
0
    def add_goal_to_pending_rule(self, goal, prule):
        """
        Add an dependency from a pending rule to a goal (a supergoal of the
        pending rule) to the dependency graph.

        :parameters:
            - `goal`: an internal representation of a goal
            - `rule`: an internal representation of a clause

        :returntype:
            `None`
        """
        self.add_goal(goal)
        assert isinstance(prule, PendingRule)
        assert self.get_annotation(prule)
        self.graph[model.freeze(goal)].append(prule)
        if not prule in self.parents or not self.parents[prule]:
            self.parents[prule] = [model.freeze(goal)]
        else:
            raise
コード例 #6
0
    def resolve_claim(self, prule, explanation):
        """
        We resolve a `claim` in 3 steps:
            - Collect all pending rules for which the first body literal is a
              generalization of this claim; resolve the rule with that claim
              (apply the substitution that unifies the claim and the first body
              literal to the rest of the pending rule). If that resolved
              pending rule is a fact, add it as a claim, otherwise add it as a
              new pending rule. We consider this a *bottom up* resolution.
            - Next, collect the stuck goals (a goal is stuck if it was sent off
              to an `InterpretState` for solving, but no solutions have
              returned yet), and check whether this claims matches with any of
              the stuck goals. This would mean the stuck goal is now no longer
              stuck.
            - Finally, the claim might also be a solution to a goal that is not
              stuck (note that subgoals get resolved via the pending rules they
              are necessarily part of, this is not the case for the top goal
              queries).

        :parameters:
            - `claim`: an internal representation of a claim, i.e., a list with
              one internal representation of a literal which is in turn again a
              list of integers.

        :returntype:
            `None`

        """
        self.log.debug('inference.resolve_claim: claim {0!s}'
                       .format(self.term_factory.close_literal(prule.clause[0])))
        self.notify()
        candidate_clauses = []
        #parents = self.logical_state.goal_dependencies.get_parents(prule)
        annotation_claim = self.logical_state.db_get_annotation(prule)
        self.log.debug('inference.resolve_claim: annotation_claim({0}) = {1}'
                       .format(prule, annotation_claim))
        if annotation_claim:
            claim_goal = annotation_claim.goal
            self.log.debug('inference.resolve_claim: claim_goal: {0}'.format(claim_goal))
            fgoal = model.freeze(claim_goal)
            self.logical_state.db_add_claim_to_goal(claim_goal, prule, explanation)
            candidate_clauses = self.logical_state.goal_dependencies.get_parents(fgoal)
            self.log.debug('inference.resolve_claim: candidate_clauses = {0}'.format(candidate_clauses))
        # else:
        #     candidate_clauses = index.get_candidate_generalizations(self.logical_state.db_get_pending_rules_index(), claim[0])
        self.log.debug('inference.resolve_claim: candidate_clauses = {0}'
                       .format(candidate_clauses))
        for candidate in candidate_clauses:
            self.propagate_claims(claim_goal, candidate)
コード例 #7
0
    def step(self, batch, phase: str):
        if self.current_epoch in self.milestone_epochs:
            print(f'freezing layer {self.current_layer}')
            freeze(self.plane_model[self.current_layer])
            self.current_layer += 1
            self.milestone_epochs = self.milestone_epochs[1:]

        images, labels = batch

        if self.training:
            feat_s, predictions = self(images, with_feature=True)
            with torch.no_grad():
                feat_t, out_t = self.teacher_model(images, with_feature=True)
            assert len(feat_s) == len(feat_t)
            dist_loss = self.dist_method(
                feat_s, feat_t, self.current_epoch / self.params['num_epochs'])

            mid_feature = self.forward(images, until=self.current_layer + 1)
            transfer_feature = self.bridges[self.current_layer](mid_feature)
            predictions = self.teacher_model(
                transfer_feature, start_forward_from=self.current_layer + 1)
            task_loss = self.criterion(predictions, labels)
            loss = task_loss + dist_loss * self.params['distill_coe']

            self.log('train/dist_loss', dist_loss)
            self.log('train/task_loss', task_loss)
        else:
            mid_feature = self.forward(images, until=self.current_layer + 1)
            transfer_feature = self.bridges[self.current_layer](mid_feature)
            predictions = self.teacher_model(
                transfer_feature, start_forward_from=self.current_layer + 1)
            loss = self.criterion(predictions, labels)

        metric = self.metric(predictions, labels)
        self.log(phase + '/' + self.params['metric'], metric)
        return loss
コード例 #8
0
    def updategT(self, subgoal, clause):
        """
        Add `clause` to the `gT` of `clause.Goal` with key `subgoal`.

        .. seealso::
            Closing algorithm specification

        """
        annotation_clause = self.logical_state.db_get_annotation(clause)
        if annotation_clause:
            goal = annotation_clause.goal
            if goal:
                annotation = self.logical_state.db_get_annotation(goal)
                if annotation:
                    key = model.freeze(subgoal)
                    if not key in annotation.gT:
                        annotation.gT[key] = []
                    annotation.gT[key].append(clause)
コード例 #9
0
    def is_completed(self, goal):
        """
        Verify whether the `goal` is completed; this assumes at least 1 run of
        both

            - :func:`etb.datalog.graph.DependencyGraph.close`, and
            - :func:`etb.datalog.graph.DependencyGraph.complete`

        :parameters:
            - `goal`: an internal goal representation

        :returntype:
            `True` or `False`

        """
        frozen = model.freeze(goal)
        annotation = self.get_annotation(frozen)
        if annotation:
            return annotation.status == Annotation.COMPLETED
        else:
            return False
コード例 #10
0
    def add_goal(self, goal):
        """
        Add a goal to the dependency graph and create a matching annotation
        that describes that goal.

        :parameters:
            - `goal`: an internal representation of a goal (i.e., a list of
              integers)

        :returntype:
            `None`
        """
        frozen_goal = model.freeze(goal)
        if not self.__node_is_present(frozen_goal):
            # add node to graph
            self.add_node(frozen_goal)
            annotation_goal = Annotation(frozen_goal, Annotation.GOAL,
                                         self.state)
            # self.log.info('graph.add_goal: Adding {0}: {1}'
            #               .format(self.state.engine.term_factory.close_literal(frozen_goal),
            #                       annotation_goal.print_status()))
            self.nodes_to_annotations[frozen_goal] = annotation_goal
コード例 #11
0
    def add_pending_rule(self, rule, explanation, parent_goal):
        """
        Add a pending rule for reasoning. We appropriately update the goal
        dependency graph depending on whether the explanation is a top down
        explanation or a bottom up resolution. In all cases, we push the first
        body literal of the `rule` to the Inference object using
        :func:`etb.datalog.inference.Inference.add_goal`.

        :parameters:
            - `rule`: an internal rule (a list of lists of integers)
            - `explanation`: see
              :func:`etb.datalog.inference.Inference.add_claim`.

        :returntype:
            `None` or `graph.PendingRule`
        """

        #if self.engine.SLOW_MODE:
        #    self.log.debug('Engine 3 Adding Pending Clause %s:', self.term_factory.close_literals(rule))
        #    time.sleep(self.engine.SLOW_MODE)

        # Add it to all clauses (for its explanation)
        self.log.debug('inference.add_pending_rule: rule {0}'
                       .format([str(c) for c in self.term_factory.close_literals(rule)]))
        self.log.debug('inference.add_pending_rule: explanation {0}'.format(explanation))
        parent_goal_claims = self.get_claims_matching_goal(parent_goal)
        self.log.debug('inference.add_pending_rule: parent_goal_claims {0}'.format(parent_goal_claims))
        parent_goal_claim_literals = parent_goal_claims
        
        if model.is_ground(rule[0]) and rule[0] in parent_goal_claim_literals:
            #do nothing if pending rule has a ground head that is already a claim
            self.log.debug('inference.add_pending_rule: pending rule subsumed by existing claim')
            return None
        # Add it to the db of pending rules (important for for example
        # `add_claim` which uses that db to resolve pending rules against claims)
        prule = self.logical_state.db_add_pending_rule(rule)
        assert isinstance(prule, graph.PendingRule)
        assert self.logical_state.db_get_annotation(prule)
        self.logical_state.db_add_clause(prule, explanation)
        # the subgoal to be added (rule[1] or a renaming if it already exists)
        new_subgoal = False
        if not model.is_fact(rule):
            subgoal = self.logical_state.is_renaming_present_of_goal(rule[1])
            if not subgoal:
                # it's new:
                subgoal = rule[1]
                new_subgoal = True

        self.log.debug('inference.add_pending_rule: explanation = {0}'
                       .format(explanation))
        # Add the goal dependency if the pending rule originates from a goal
        if explanation and model.is_top_down_explanation(explanation):
            original_goal = model.get_goal_from_explanation(explanation)
            assert isinstance(prule, graph.PendingRule)
            self.logical_state.db_add_goal_to_pending_rule(original_goal, prule)
            # update goal of rule to be the original_goal
            self.log.debug('inference.add_pending_rule: calling update_goal top_down: {0}'.format(original_goal))
            self.update_goal(prule, original_goal)
        if explanation and model.is_bottom_up_explanation(explanation):
            originating_rule = model.get_rule_from_explanation(explanation)
            self.logical_state.db_add_pending_rule_to_pending_rule(originating_rule, prule)
            # goal these pending clauses all originate from
            originating_rule_annotation = self.logical_state.db_get_annotation(originating_rule)
            if originating_rule_annotation:
                goal_of_originating_rule = originating_rule_annotation.goal
                self.log.debug('inference.add_pending_rule: calling update_goal bottom_up: {0}'.format(goal_of_originating_rule))
                self.update_goal(prule, goal_of_originating_rule)
            # this means we found a solution to the subgoal (first body
            # literal) of model.get_rule_from_explanation(explanation); so we
            # update subgoalindex of that rule with 1 (one solution propagated)
            # This is already being done in propagate_claim_to_pending_clause
            # self.increase_subgoalindex(originating_rule)
            if not model.is_fact(rule):
                self.updategT(subgoal, rule)
        if self.engine.SLOW_MODE:
            self.log.debug('Slowing down before updating goal dependencies by adding pending rule to subgoal edge.')
            time.sleep(self.engine.SLOW_MODE)


        # Further add the first literal of the pending rule to the goals (in
        # the Inference engine, so this potentially triggers further
        # deduction)
        if model.is_fact(rule):
            self.add_claim(prule, explanation)
        else:  #then subgoal must be set
            self.log.debug('inference:subgoal: {0} from pending rule: {1}'.format(subgoal, rule))
            self.logical_state.db_add_pending_rule_to_goal(prule, subgoal)
            fsubgoal = model.freeze(subgoal)
            self.updategT(subgoal, prule)

            # The goal dependencies graph has been updated at this point: unlock
            
            # and continue by adding this goal
            if new_subgoal:
                self.add_goal(subgoal)
            else:
                self.log.debug('inference.add_pending_rule with known subgoal: {0}'
                               .format(self.term_factory.close_literal(subgoal)))
                self.propagate_claims(subgoal, prule)

            # # Try to resolve it with existing claims (this is missing in the
            # # original engine3 description and breaks for example
            # # test_simple_program in test_engine.py)
            # self.resolve_pending_rule(rule)

        if self.engine.CLOSE_DURING_INFERENCING:
            self.engine.close()
        return prule
コード例 #12
0
 def __init__(self, clause):
     self.clause = model.freeze(clause)
コード例 #13
0
def fine_tune(device, model, model_pre_train_pth, model_fine_tune_pth):

    print()
    print("***** Start FINE-TUNING *****")
    print()

    # ------------
    #  Load Dunhuang Grottoes data
    # ------------

    print("---> preparing dataloader...")

    # Training dataloader. Length = dataset size / batch size
    train_dataset = data.DATA(mode="train", train_status="finetune")
    dataloader_train = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=argparser.n_cpu
    )
    print("---> length of training dataset: ", len(train_dataset))

    # Load test images
    test_dataset = data.DATA(mode="test", train_status="test")
    dataloader_test = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=args.batch_size_test,
        shuffle=False,
        num_workers=argparser.n_cpu
    )
    print("---> length of test dataset: ", len(test_dataset))

    # -------
    # Test reconstruct
    # -------
    # for idx, (imgs_masked, masks, gts, info) in enumerate(dataloader_test):
    #     print("masked images shape: ", gts.shape)
    #     print("masked images shape: ", imgs_masked.shape)
    #     print("masked images shape: ", masks.shape)
    #     name = str.split(info['name'][0], '_')
    #     reconstruct = reconstruct_img.reconstruct(imgs_masked.squeeze(), int(info['Heigth']), int(info['Width']), name[0], args)
    #     reconstruct.save('test.jpg')
    #     te = np.asarray(reconstruct)
    #     print(te.shape)
    #     print(name[0])
    #
    #     gts = gts.squeeze()
    #     gts = gts.permute(1, 2, 0).numpy()
    #     gts = (gts * 255).astype('uint8')

    # -------
    # Model
    # -------

    # load model from fine-tune checkpoint if available
    if os.path.exists(model_fine_tune_pth):
        print("---> found previously saved {}, loading checkpoint and CONTINUE fine-tuning"
              .format(args.saved_fine_tune_name))
        load_model(model, model_fine_tune_pth)
    # load best pre-train model and start fine-tuning
    elif os.path.exists(model_pre_train_pth) and args.train_mode == "w_pretrain":
        print("---> found previously saved {}, loading checkpoint and START fine-tuning"
              .format(args.saved_pre_train_name))
        load_model(model, model_pre_train_pth)

    # freeze batch-norm params in fine-tuning
    if args.train_mode == "w_pretrain" and args.pretrain_epochs > 10:
        model.freeze()

    # ----------------
    #  Optimizer
    # ----------------

    # Optimizer
    print("---> preparing optimizer...")
    optimizer = optim.Adam(model.parameters(), lr=argparser.LR_FT)
    criterion = nn.MSELoss()
    # Move model to device
    model.to(device)

    # ----------
    #  Training
    # ----------

    print("---> start training cycle ...")
    with open(os.path.join(args.output_dir, "finetune_losses.csv"), "w", newline="") as csv_losses:
        with open(os.path.join(args.output_dir, "finetune_scores.csv"), "w", newline="") as csv_scores:
            writer_losses = csv.writer(csv_losses)
            writer_losses.writerow(["Epoch", "Iteration", "Loss"])

            writer_scores = csv.writer(csv_scores)
            writer_scores.writerow(["Epoch", "Total Loss", "MSE", "SSIM", "Final Score"])

            iteration = 0
            highest_final_score = 0.0   # the higher the better, combines mse and ssim

            for epoch in range(args.finetune_epochs):

                model.train()

                loss_sum = 0    # store accumulated loss for one epoch

                for idx, (imgs_masked, masks, gts) in enumerate(dataloader_train):

                    # Move to device
                    imgs_masked = imgs_masked.to(device)    # (N, 3, H, W)
                    masks = masks.to(device)                # (N, 1, H, W)
                    gts = gts.to(device)                    # (N, 3, H, W)

                    #print("masked images shape: ",imgs_masked.shape)
                    #print("masks shape: ",masks.shape)
                    #print("target images shape: ",gts.shape)

                    # Model forward path => predicted images
                    preds = model(imgs_masked, masks)

                    original_pixels = torch.mul(masks, imgs_masked)
                    ones = torch.ones(masks.size()).cuda()
                    reversed_masks = torch.sub(ones, masks)
                    predicted_pixels = torch.mul(reversed_masks, preds)
                    preds = torch.add(original_pixels, predicted_pixels)

                    # Calculate total loss
                    #train_loss = loss.total_loss(preds, gts)
                    train_loss = criterion(preds, gts)
                    # Execute Back-Propagation
                    optimizer.zero_grad()
                    train_loss.backward()
                    optimizer.step()

                    print("\r[Epoch %d/%d] [Batch %d/%d] [Loss: %f]" %
                          (epoch + 1, args.finetune_epochs, (idx + 1), len(dataloader_train), train_loss), end="")

                    loss_sum += train_loss.item()
                    writer_losses.writerow([epoch+1, iteration+1, train_loss.item()])
                    iteration += 1

                # ------------------
                #  Evaluate & Save Model
                # ------------------

                if (epoch+1) % args.val_epoch == 0:
                    mse, ssim = test.test(args, model, device, dataloader_test, mode="validate")
                    final_score = 1 - mse / 100 + ssim
                    print("\nMetrics on test set @ epoch {}:".format(epoch+1))
                    print("-> Average MSE:  {:.5f}".format(mse))
                    print("-> Average SSIM: {:.5f}".format(ssim))
                    print("-> Final Score:  {:.5f}".format(final_score))

                    if final_score > highest_final_score:
                        save_model(model, model_fine_tune_pth)
                        highest_final_score = final_score

                    writer_scores.writerow([epoch+1, loss_sum, mse, ssim, final_score])

                save_model(model, os.path.join(args.model_dir_fine_tune, "Net_finetune_epoch{}.pth.tar".format(epoch+1)))
                if epoch > 0:
                    remove_prev_model(os.path.join(args.model_dir_fine_tune, "Net_finetune_epoch{}.pth.tar".format(epoch)))

    print("\n***** Fine-tuning FINISHED *****")