def test(self, model, test_set, l_loss, m_loss):
        model.train(mode=False)
        loss_classification_sum = 0
        loss_segmentation_sum = 0
        accuracy_classification_sum = 0
        batch_count = 0
        for images, segments, labels in test_set:
            labels, segments = model_utils.reduce_to_class_number(self.left_class_number, self.right_class_number,
                                                                  labels,
                                                                  segments)
            images, labels, segments = self.convert_data_and_label(images, labels, segments)
            segments_list = []
            for puller in self.puller:
                segments_list.append(puller(segments))
            model_classification, model_segmentation = model_utils.wait_while_can_execute(model, images)

            classification_loss = l_loss(model_classification, labels)
            if self.use_mloss:
                sum_segm_loss = None
                for ms, sl in zip(model_segmentation, segments_list):
                    segmentation_loss = self.m_loss(ms, sl)
                    if sum_segm_loss is None:
                        sum_segm_loss = segmentation_loss
                    else:
                        sum_segm_loss += segmentation_loss

            output_probability, output_cl, cl_acc = self.calculate_accuracy(labels, model_classification,
                                                                            labels.size(0))

            self.save_test_data(labels, output_cl, output_probability)

            # accumulate information
            accuracy_classification_sum += model_utils.scalar(cl_acc.sum())
            loss_classification_sum += model_utils.scalar(classification_loss.sum())
            if self.use_mloss:
                loss_segmentation_sum += model_utils.scalar(sum_segm_loss.sum())
            batch_count += 1
            # self.de_convert_data_and_label(images, labels)
            # torch.cuda.empty_cache()

        f_1_score_text, recall_score_text, precision_score_text = metrics_processor.calculate_metric(self.classes,
                                                                                                     self.test_trust_answers,
                                                                                                     self.test_model_answers)

        loss_classification_sum /= batch_count + p.EPS
        accuracy_classification_sum /= batch_count + p.EPS
        loss_segmentation_sum /= batch_count + p.EPS
        text = 'TEST={} Loss_CL={:.5f} Loss_M={:.5f} Accuracy_CL={:.5f} {} {} {} '.format(self.current_epoch,
                                                                                          loss_classification_sum,
                                                                                          loss_segmentation_sum,
                                                                                          accuracy_classification_sum,
                                                                                          f_1_score_text,
                                                                                          recall_score_text,
                                                                                          precision_score_text)
        p.write_to_log(text)
        model.train(mode=True)
        return loss_classification_sum, accuracy_classification_sum
Example #2
0
    def test(self, model, test_set, l_loss, m_loss=None):
        loss_classification_sum = 0
        accuracy_classification_sum = 0
        batch_count = 0
        self.model.train(mode=False)
        for images, segments, labels in test_set:
            labels, segments = model_utils.reduce_to_class_number(self.left_class_number, self.right_class_number,
                                                                  labels,
                                                                  segments)
            images, labels, segments = self.convert_data_and_label(images, labels, segments)
            model_classification = model_utils.wait_while_can_execute_single(model, images)

            sigmoid = nn.Sigmoid()  # used for calculate accuracy
            model_classification = sigmoid(model_classification)
            classification_loss = l_loss(model_classification, labels)

            output_probability, output_cl, cl_acc = self.calculate_accuracy(labels, model_classification,
                                                                            labels.size(0))

            self.save_test_data(labels, output_cl, output_probability)

            # accumulate information
            accuracy_classification_sum += model_utils.scalar(cl_acc.sum())
            loss_classification_sum += model_utils.scalar(classification_loss.sum())
            batch_count += 1
            # self.de_convert_data_and_label(images, labels)
            # torch.cuda.empty_cache()

        f_1_score_text, recall_score_text, precision_score_text = metrics_processor.calculate_metric(self.classes,
                                                                                                     self.test_trust_answers,
                                                                                                     self.test_model_answers)

        loss_classification_sum /= batch_count + p.EPS
        accuracy_classification_sum /= batch_count + p.EPS
        text = 'TEST={} Loss_CL={:.5f} Accuracy_CL={:.5f} {} {} {} '.format(self.current_epoch,
                                                                            loss_classification_sum,
                                                                            accuracy_classification_sum,
                                                                            f_1_score_text,
                                                                            recall_score_text,
                                                                            precision_score_text)
        p.write_to_log(text)

        return loss_classification_sum, accuracy_classification_sum
Example #3
0
    def train(self):
        if self.is_vgg_model:
            classifier_optimizer = torch.optim.Adam(gr.register_weights("classifier", self.am_model),
                                                    lr=self.classifier_learning_rate)
            attention_module_optimizer = torch.optim.Adam(gr.register_weights("attention", self.am_model),
                                                          lr=self.attention_module_learning_rate)
        else:
            classifier_optimizer = torch.optim.Adam(rgr.register_weights("classifier", self.am_model),
                                                    lr=self.classifier_learning_rate)
            attention_module_optimizer = torch.optim.Adam(rgr.register_weights("attention", self.am_model),
                                                          lr=self.attention_module_learning_rate)

        self.best_weights = copy.deepcopy(self.am_model.state_dict())
        best_loss = None
        best_test_loss = None

        while self.current_epoch <= self.train_epochs:

            accuracy_classification_sum_classifier = 0
            accuracy_classification_sum_segments = 0
            loss_l1_sum = 0
            # classifier_optimizer = self.apply_adaptive_learning(classifier_optimizer, learning_rate,
            #                                                       self.current_epoch)

            if self.current_epoch <= self.pre_train_epochs:
                accuracy_classification_sum_segments, loss_m_sum, loss_l1_sum, loss_classification_sum_classifier = \
                    self.train_segments(self.am_model, self.l_loss, self.m_loss, attention_module_optimizer,
                                        self.train_segments_set)
                attention_module_optimizer.zero_grad()
            else:
                loss_classification_sum_classifier, accuracy_classification_sum_classifier, loss_m_sum = \
                    self.train_classifier(self.am_model, self.l_loss, self.m_loss, classifier_optimizer,
                                          self.train_segments_set)
                classifier_optimizer.zero_grad()
            accuracy_total = accuracy_classification_sum_segments + accuracy_classification_sum_classifier
            loss_total = loss_classification_sum_classifier + loss_m_sum

            prefix = "PRETRAIN" if self.current_epoch <= self.pre_train_epochs else "TRAIN"
            f_1_score_text, recall_score_text, precision_score_text = metrics_processor.calculate_metric(self.classes,
                                                                                                         self.train_trust_answers,
                                                                                                         self.train_model_answers)

            text = "{}={} Loss_CL={:.5f} Loss_M={:.5f} Loss_L1={:.5f} Loss_Total={:.5f} Accuracy_CL={:.5f} " \
                   "{} {} {} ".format(prefix, self.current_epoch, loss_classification_sum_classifier,
                                      loss_m_sum,
                                      loss_l1_sum,
                                      loss_total,
                                      accuracy_total,
                                      f_1_score_text,
                                      recall_score_text,
                                      precision_score_text)

            p.write_to_log(text)

            if self.current_epoch % self.test_each_epoch == 0:
                test_loss, _ = self.test(self.am_model, self.test_set, self.l_loss, self.m_loss)
                if best_test_loss is None or test_loss < best_test_loss:
                    best_test_loss = test_loss
                    self.best_test_weights = copy.deepcopy(self.am_model.state_dict())
            if self.current_epoch % 200 == 0:
                self.take_snapshot(self.train_segments_set, self.am_model, "TRAIN_{}".format(self.current_epoch))
                self.take_snapshot(self.test_set, self.am_model, "TEST_{}".format(self.current_epoch))

            if best_loss is None or loss_total < best_loss:
                best_loss = loss_total
                self.best_weights = copy.deepcopy(self.am_model.state_dict())

            self.clear_temp_metrics()
            self.current_epoch += 1

        self.save_model(self.best_test_weights)
        self.save_model(self.best_weights)
    def train(self):
        optimizer = torch.optim.Adam(self.am_model.parameters(), self.classifier_learning_rate)

        while self.current_epoch <= self.train_epochs:

            loss_m_sum = 0
            loss_l1_sum = 0

            loss_classification_sum = 0
            loss_segmentation_sum = 0
            accuracy_sum = 0
            batch_count = 0
            self.am_model.train(mode=True)
            for images, segments, labels in self.train_segments_set:
                labels, segments = model_utils.reduce_to_class_number(self.left_class_number, self.right_class_number,
                                                                      labels,
                                                                      segments)
                images, labels, segments = self.convert_data_and_label(images, labels, segments)
                segments_list = []
                for puller in self.puller:
                    segments_list.append(puller(segments))

                # calculate and optimize model
                optimizer.zero_grad()

                model_classification, model_segmentation = model_utils.wait_while_can_execute(self.am_model, images)
                classification_loss = self.l_loss(model_classification, labels)
                total_loss = classification_loss

                if self.use_mloss:
                    sum_segm_loss = None
                    for ms, sl in zip(model_segmentation, segments_list):
                        segmentation_loss = self.m_loss(ms, sl)
                        total_loss += segmentation_loss
                        if sum_segm_loss is None:
                            sum_segm_loss = segmentation_loss
                        else:
                            sum_segm_loss += segmentation_loss
                total_loss.backward()
                optimizer.step()

                output_probability, output_cl, cl_acc = self.calculate_accuracy(labels, model_classification,
                                                                                labels.size(0))

                optimizer.zero_grad()

                self.save_train_data(labels, output_cl, output_probability)

                accuracy_sum += model_utils.scalar(cl_acc.sum())
                loss_classification_sum += model_utils.scalar(classification_loss.sum())
                if self.use_mloss:
                    loss_segmentation_sum += model_utils.scalar(sum_segm_loss.sum())
                batch_count += 1

            loss_classification_sum = loss_classification_sum / (batch_count + p.EPS)
            accuracy_sum = accuracy_sum / (batch_count + p.EPS)
            loss_segmentation_sum = loss_segmentation_sum / (batch_count + p.EPS)
            loss_total = loss_classification_sum + loss_m_sum + loss_segmentation_sum
            prefix = "TRAIN"
            f_1_score_text, recall_score_text, precision_score_text = metrics_processor.calculate_metric(self.classes,
                                                                                                         self.train_trust_answers,
                                                                                                         self.train_model_answers)

            text = "{}={} Loss_CL={:.5f} Loss_M={:.5f} Loss_L1={:.5f} Loss_Total={:.5f} Accuracy_CL={:.5f} " \
                   "{} {} {} ".format(prefix, self.current_epoch, loss_classification_sum,
                                      loss_m_sum,
                                      loss_l1_sum,
                                      loss_total,
                                      accuracy_sum,
                                      f_1_score_text,
                                      recall_score_text,
                                      precision_score_text)

            P.write_to_log(text)

            if self.current_epoch % self.test_each_epoch == 0:
                test_loss, _ = self.test(self.am_model, self.test_set, self.l_loss, self.m_loss)

            self.clear_temp_metrics()
            self.current_epoch += 1
Example #5
0
    def train(self):

        params = self.model.parameters()
        optimizer = torch.optim.Adam(params, lr=self.classifier_learning_rate, weight_decay=self.weight_decay)
        best_loss = None
        best_test_loss = None

        while self.current_epoch <= self.train_epochs:

            loss_classification_sum = 0
            accuracy_classification_sum = 0
            batch_count = 0

            self.model.train(mode=True)
            for images, segments, labels in self.train_segments_set:
                labels, segments = model_utils.reduce_to_class_number(self.left_class_number, self.right_class_number,
                                                                      labels,
                                                                      segments)
                images, labels, segments = self.convert_data_and_label(images, labels, segments)
                segments = self.puller(segments)

                # calculate and optimize model
                optimizer.zero_grad()

                model_classification = model_utils.wait_while_can_execute_single(self.model, images)
                sigmoid = nn.Sigmoid()  # used for calculate accuracy
                model_classification = sigmoid(model_classification)

                # все дело в инцептион может быть
                classification_loss = self.l_loss(model_classification, labels)
                # torch.cuda.empty_cache()
                classification_loss.backward()
                optimizer.step()

                output_probability, output_cl, cl_acc = self.calculate_accuracy(labels, model_classification,
                                                                                labels.size(0))

                self.save_train_data(labels, output_cl, output_probability)

                # accumulate information
                accuracy_classification_sum += model_utils.scalar(cl_acc.sum())
                loss_classification_sum += model_utils.scalar(classification_loss.sum())
                batch_count += 1
                # self.de_convert_data_and_label(images, segments, labels)
                # torch.cuda.empty_cache()

            if best_loss is None or loss_classification_sum < best_loss:
                best_loss = loss_classification_sum
                self.best_weights = copy.deepcopy(self.model.state_dict())

            f_1_score_text, recall_score_text, precision_score_text = metrics_processor.calculate_metric(self.classes,
                                                                                                         self.train_trust_answers,
                                                                                                         self.train_model_answers)
            text = "TRAIN={} Loss_CL={:.10f} Accuracy_CL={:.5f} {} {} {} ".format(self.current_epoch,
                                                                                  loss_classification_sum / batch_count,
                                                                                  accuracy_classification_sum / batch_count,
                                                                                  f_1_score_text,
                                                                                  recall_score_text,
                                                                                  precision_score_text)
            p.write_to_log(text)
            if self.current_epoch % self.test_each_epoch == 0:
                test_loss, _ = self.test(self.model, self.test_set, self.l_loss)
                if best_test_loss is None or test_loss < best_test_loss:
                    best_test_loss = test_loss
                    self.best_test_weights = copy.deepcopy(self.model.state_dict())

            if best_loss is None or loss_classification_sum < best_loss:
                best_loss = loss_classification_sum
                self.best_weights = copy.deepcopy(self.model.state_dict())
            self.clear_temp_metrics()

            self.current_epoch += 1

        self.save_model(self.best_test_weights)
        self.save_model(self.best_weights)
Example #6
0
    def train(self):
        if self.is_vgg_model:
            classifier_optimizer = torch.optim.Adam(gr.register_weights("classifier", self.am_model),
                                                    self.classifier_learning_rate)
            attention_module_optimizer = torch.optim.Adam(gr.register_weights("attention", self.am_model),
                                                          lr=self.attention_module_learning_rate)
        else:
            classifier_optimizer = torch.optim.Adam(rgr.register_weights("classifier", self.am_model),
                                                    self.classifier_learning_rate)
            attention_module_optimizer = torch.optim.Adam(rgr.register_weights("attention", self.am_model),
                                                          lr=self.attention_module_learning_rate)

        while self.current_epoch <= self.train_epochs:

            loss_m_sum = 0
            loss_l1_sum = 0

            loss_classification_sum = 0
            loss_segmentation_sum = 0
            accuracy_sum = 0
            batch_count = 0
            self.am_model.train(mode=True)
            for images, segments, labels in self.train_segments_set:
                labels, segments = model_utils.reduce_to_class_number(self.left_class_number, self.right_class_number,
                                                                      labels,
                                                                      segments)
                images, labels, segments = self.convert_data_and_label(images, labels, segments)
                segments = self.puller(segments)

                # calculate and optimize model
                classifier_optimizer.zero_grad()
                attention_module_optimizer.zero_grad()

                model_classification, model_segmentation = model_utils.wait_while_can_execute(self.am_model, images)
                segmentation_loss = self.m_loss(model_segmentation, segments)
                classification_loss = self.l_loss(model_classification, labels)
                # torch.cuda.empty_cache()
                classification_loss.backward(retain_graph=True)
                segmentation_loss.backward()

                classifier_optimizer.step()
                attention_module_optimizer.step()

                output_probability, output_cl, cl_acc = self.calculate_accuracy(labels, model_classification,
                                                                                labels.size(0))

                classifier_optimizer.zero_grad()
                attention_module_optimizer.zero_grad()

                self.save_train_data(labels, output_cl, output_probability)

                # accumulate information
                accuracy_sum += model_utils.scalar(cl_acc.sum())
                loss_classification_sum += model_utils.scalar(classification_loss.sum())
                loss_segmentation_sum += model_utils.scalar(segmentation_loss.sum())
                batch_count += 1
                # self.de_convert_data_and_label(images, segments, labels)
                # torch.cuda.empty_cache()

            loss_classification_sum = loss_classification_sum / (batch_count + p.EPS)
            accuracy_sum = accuracy_sum / (batch_count + p.EPS)
            loss_segmentation_sum = loss_segmentation_sum / (batch_count + p.EPS)
            loss_total = loss_classification_sum + loss_m_sum + loss_segmentation_sum
            prefix = "PRETRAIN" if self.current_epoch <= self.pre_train_epochs else "TRAIN"
            f_1_score_text, recall_score_text, precision_score_text = metrics_processor.calculate_metric(self.classes,
                                                                                                         self.train_trust_answers,
                                                                                                         self.train_model_answers)

            text = "{}={} Loss_CL={:.5f} Loss_M={:.5f} Loss_L1={:.5f} Loss_Total={:.5f} Accuracy_CL={:.5f} " \
                   "{} {} {} ".format(prefix, self.current_epoch, loss_classification_sum,
                                      loss_m_sum,
                                      loss_l1_sum,
                                      loss_total,
                                      accuracy_sum,
                                      f_1_score_text,
                                      recall_score_text,
                                      precision_score_text)

            P.write_to_log(text)
            self.am_model.train(mode=False)
            if self.current_epoch % self.test_each_epoch == 0:
                test_loss, _ = self.test(self.am_model, self.test_set, self.l_loss, self.m_loss)
            if self.current_epoch % 200 == 0:
                self.take_snapshot(self.train_segments_set, self.am_model, "TRAIN_{}".format(self.current_epoch))
                self.take_snapshot(self.test_set, self.am_model, "TEST_{}".format(self.current_epoch))

            self.clear_temp_metrics()
            self.current_epoch += 1