Ejemplo n.º 1
0
    def predict(self):
        train_pred = self.model.predict(self.X_train,
                                        batch_size=self.batch_size)
        dev_pred = self.model.predict(self.X_dev, batch_size=self.batch_size)
        test_pred = self.model.predict(self.X_test, batch_size=self.batch_size)

        train_pred = np.round(train_pred)
        dev_pred = np.round(dev_pred)
        test_pred = np.round(test_pred)

        self.dev_acc = accuracy_score(self.Y_dev, dev_pred)
        self.dev_precision = precision(self.Y_dev, dev_pred)
        self.dev_recall = recall(self.Y_dev, dev_pred)
        self.dev_f1 = f1_score(self.Y_dev, dev_pred)
        self.dev_false_pos_rate = false_positive_rate(self.Y_dev, dev_pred)

        self.test_acc = accuracy_score(self.Y_test, test_pred)
        self.test_precision = precision(self.Y_test, test_pred)
        self.test_recall = recall(self.Y_test, test_pred)
        self.test_f1 = f1_score(self.Y_test, test_pred)
        self.test_false_pos_rate = false_positive_rate(self.Y_test, test_pred)

        if self.dev_acc > self.best_dev_acc:
            self.best_dev_acc = self.dev_acc
            self.best_dev_precision = self.dev_precision
            self.best_dev_recall = self.dev_recall
            self.best_dev_f1 = self.dev_f1
            self.best_dev_false_pos_rate = self.dev_false_pos_rate

            self.best_test_acc = self.test_acc
            self.best_test_precision = self.test_precision
            self.best_test_recall = self.test_recall
            self.best_test_f1 = self.test_f1
            self.best_test_false_pos_rate = self.test_false_pos_rate
def evaluate(dataset, model, target_size, return_pred=False, loss_function=None):
    results = []

    eval_loader = data_reader.data_loader(dataset, config.BATCH_SIZE, shuffle=False)
    n_batches = int(np.ceil(len(dataset)/config.BATCH_SIZE))
    pbar = tqdm(range(n_batches))
    model.eval()
    total_loss = 0.
    for i in pbar:
        x, x_rc, md, y, x_len = next(iter(eval_loader))
        score = model(x, x_rc, md)
        loss = 0.
        if loss_function is not None:
            loss = loss_function(score, y)
            total_loss += loss.mean().item()
        probs = torch.nn.Softmax(dim=1)(score)
        results.append(probs.detach().cpu().numpy())
    model.train()

    results = np.vstack(results)

    if loss_function is not None:
        print('Val Loss: {:.5f}'.format(total_loss/n_batches))

    if return_pred:
        return results

    y_pred = np.argmax(results, axis=1)
    acc = accuracy(dataset['y'], y_pred)
    top10 = top10_accuracy_scorer(np.array(dataset['y']), results, target_size=target_size)
    f1 = f1_score(dataset['y'], y_pred)
    # print(acc, top10, f1)
    return acc, top10, f1
def logistic_regression(train_data, train_labels, test_data, test_labels):

    print(f'{LogisticRegression.__name__}:')

    # Create and train model
    lr_model = LogisticRegression(train_data.shape[1], eta=0.001, epochs=50)
    model = OneVersusRest(lr_model)

    model.train(train_data, train_labels)

    # Predict 2000 validation set samples and calculate accuracy
    test_data_2k = test_data[:len(test_labels)]
    test_pred = model.predict(test_data_2k)

    # Print metrics
    print('\nTest Accuracy: {:.02f}%\n'.format(
        100 * accuracy(test_pred, test_labels)))
    mat, classes = confusion_matrix(test_pred, test_labels)
    print('Precision:\n{}\n'.format(
        np.round(precision(test_pred, test_labels), 2)))
    print('Recall:\n{}\n'.format(np.round(recall(test_pred, test_labels), 2)))
    print('F1:\n{}\n'.format(np.round(f1_score(test_pred, test_labels), 2)))
    print('Confusion Matrix:')
    print(mat)

    # Predict 10000 test set samples and save predictions
    print('Predicting 10k samples...')
    test_pred = model.predict(test_data)
    save_predictions(logistic_regression.__name__, test_pred)
    print('Saved 10k predictions.\n')
def linear_svm(train_data, train_labels, test_data, test_labels):
    print(f'{LinearSVM.__name__}:')

    # Create and train model
    lsvm_model = LinearSVM(alpha=0.01, features=180)
    model = OneVersusRest(lsvm_model)

    model.train(train_data, train_labels)

    # Predict 2000 validation set samples and calculate accuracy
    test_data_2k = test_data[:len(test_labels)]
    test_pred = model.predict(test_data_2k)

    # Print metrics
    print('\nTest Accuracy: {:.02f}%\n'.format(
        100 * accuracy(test_pred, test_labels)))
    mat, classes = confusion_matrix(test_pred, test_labels)
    print('Precision:\n{}\n'.format(
        np.round(precision(test_pred, test_labels), 2)))
    print('Recall:\n{}\n'.format(np.round(recall(test_pred, test_labels), 2)))
    print('F1:\n{}\n'.format(np.round(f1_score(test_pred, test_labels), 2)))
    print('Confusion Matrix:')
    print(mat)

    # Predict 10000 test set samples and save predictions
    print('Predicting 10k samples...')
    test_pred = model.predict(test_data)
    save_predictions(linear_svm.__name__, test_pred)
    print('Saved 10k predictions.\n')
def nearest_neighbour(train_data, train_labels, test_data, test_labels):

    print(f'{NearestNeighbour.__name__}:')

    # Create and train model
    model = NearestNeighbour(5, dist=manhattan)
    model.train(train_data, train_labels)

    # Predict 2000 validation set samples and calculate accuracy
    test_data_2k = test_data[:len(test_labels)]
    test_pred = model.predict(test_data_2k)

    # Print metrics
    print('\nTest Accuracy: {:.02f}%\n'.format(
        100 * accuracy(test_pred, test_labels)))
    mat, classes = confusion_matrix(test_pred, test_labels)
    print('Precision:\n{}\n'.format(
        np.round(precision(test_pred, test_labels), 2)))
    print('Recall:\n{}\n'.format(np.round(recall(test_pred, test_labels), 2)))
    print('F1:\n{}\n'.format(np.round(f1_score(test_pred, test_labels), 2)))
    print('Confusion Matrix:')
    print(mat)

    # Predict 10000 test set samples and save predictions
    print('Predicting 10k samples...')
    test_pred = model.predict(test_data)
    save_predictions(nearest_neighbour.__name__, test_pred)
    print('Saved 10k predictions.\n')
Ejemplo n.º 6
0
    def process_sample(self, image, target):
        # scale coords in 0 - 1 range
        ROI = target[:, :4]

        heart_presence = target[:, 4]
        heart_presence = heart_presence.to(torch.int64)

        gt_for_anc = target[:, 5:]

        ROI_pred, ROI_conf_pred, score_pred = self.model(image)
        with torch.no_grad():
            # when computing metrics, only take the most confident prediction in consideration
            # most confident per batch
            most_conf = ROI_conf_pred.argmax(dim=1)

            # batch x 4
            confident_ROI_pred = ROI_pred[torch.arange(len(most_conf)),
                                          most_conf]

            # batch x 4, get the corresponding anchor
            anchors = self.model.anchors_xyxy[most_conf]
            iou, iou_harsh = metrics.harsh_IOU(confident_ROI_pred, ROI,
                                               heart_presence, anchors)
            f1 = metrics.f1_score(score_pred, heart_presence)

        return ROI, heart_presence, gt_for_anc, ROI_pred, score_pred, ROI_conf_pred, [
            iou, iou_harsh, f1
        ]
Ejemplo n.º 7
0
def calc_metric(pred, true):
    tmp_jac = jaccard(pred, true)
    tmp_ham = hamming(pred, true)
    tmp_prec = precision(pred, true)
    tmp_rec = recall(pred, true)
    tmp_f1 = f1_score(pred, true)
    tmp_acc = accuracy(pred, true)
    return tmp_jac, tmp_ham, tmp_prec, tmp_rec, tmp_f1, tmp_acc
Ejemplo n.º 8
0
def card(corpus):
    global vectorizer
    jac_list = []
    ham_list = []
    prec_list = []
    rec_list = []
    f1_list = []
    acc_list = []
    id2term = {}
    for k, v in vectorizer.vocabulary_.items():
        id2term[v] = k
    for idx, item in enumerate(corpus):
        docstring = vectorizer.transform([item[-1]])[0]
        true_types = item[3]
        terms = [id2term[i] for i in docstring.indices]
        pred_types = []
        # try hint terms of each basic type whether match a type
        # term pattern `\w+(\_)?name|\w+(\_)?method`
        # is more likely to be a `str` type
        for basic_type in basic_types:
            h_terms = hint_terms[basic_type]
            if set(terms) & set(h_terms):
                pred_types.append('List' if basic_type ==
                                  'Tuple' else basic_type)
            elif basic_type == 'str':
                for term in terms:
                    if type_to_regexp[basic_type].match(term):
                        pred_types.append(basic_type)
                        break
        # if type `Dict` and `str` in pred_types at same time
        # remove `str` as type hardly never occur `Dict` and `str` at same time
        if 'Dict' in pred_types and 'str' in pred_types:
            pred_types.remove('str')
        # type `Type` often occur independently
        if 'Type' in pred_types and len(pred_types) > 1:
            pred_types.remove('Type')
        # if pred_types is empty
        # then find whether exists a term whose part of speech(pos) is NNS or NNPS
        # if found, the pos of this variable is likely to be `List`
        if not pred_types:
            pos_tags = [tag for w, tag in nltk.pos_tag(terms)]
            if set(['NNS', 'NNPS']) & set(pos_tags):
                pred_types = ['List']

        pred_types = set(pred_types)
        included_types = set(true_types) - set(['NoneType'])
        if 'Tuple' in included_types:
            included_types -= set(['Tuple'])
            included_types.add('List')

        # calculate Jaccard Coefficient
        jac_list.append(jaccard(pred_types, included_types))
        ham_list.append(hamming(pred_types, included_types))
        prec_list.append(precision(pred_types, included_types))
        rec_list.append(recall(pred_types, included_types))
        f1_list.append(f1_score(pred_types, included_types))
        acc_list.append(accuracy(pred_types, included_types))
    return jac_list, ham_list, prec_list, rec_list, f1_list, acc_list
Ejemplo n.º 9
0
    def predict(self):
        train_pred = self.model.predict(self.X_train,
                                        batch_size=self.batch_size)
        dev_pred = self.model.predict(self.X_dev, batch_size=self.batch_size)
        test_pred = self.model.predict(self.X_test, batch_size=self.batch_size)

        train_pred = np.round(train_pred)
        dev_pred = np.round(dev_pred)
        test_pred = np.round(test_pred)

        self.dev_acc = accuracy_score(self.Y_dev, dev_pred)
        self.dev_precision = precision(self.Y_dev, dev_pred)
        self.dev_recall = recall(self.Y_dev, dev_pred)
        self.dev_f1 = f1_score(self.Y_dev, dev_pred)
        self.dev_false_pos_rate = false_positive_rate(self.Y_dev, dev_pred)

        self.test_acc = accuracy_score(self.Y_test, test_pred)
        self.test_precision = precision(self.Y_test, test_pred)
        self.test_recall = recall(self.Y_test, test_pred)
        self.test_f1 = f1_score(self.Y_test, test_pred)
        self.test_false_pos_rate = false_positive_rate(self.Y_test, test_pred)

        if self.dev_acc > self.best_dev_acc:
            self.best_dev_acc = self.dev_acc
            self.best_dev_precision = self.dev_precision
            self.best_dev_recall = self.dev_recall
            self.best_dev_f1 = self.dev_f1
            self.best_dev_false_pos_rate = self.dev_false_pos_rate

            self.best_test_acc = self.test_acc
            self.best_test_precision = self.test_precision
            self.best_test_recall = self.test_recall
            self.best_test_f1 = self.test_f1
            self.best_test_false_pos_rate = self.test_false_pos_rate

            # SAVE MODE TO JSON
            model_json = self.model.to_json()
            with open(self.save_path + '.json', 'w') as json_file:
                json_file.write(model_json)
            # SAVE WEIGHTS
            self.model.save_weights(self.save_path + '.h5')
            logger.info("Model saved")
def neural_net(train_data, train_labels, test_data, test_labels):

    print(f'{NeuralNetwork.__name__}:')

    # Create and train model
    model = NeuralNetwork([
        FlatDenseLayer((784, ), activation=tanh),
        FlatDenseLayer((100, ), activation=tanh),
        FlatDenseLayer((20, ), activation=tanh),
        FlatDenseLayer((10, ), activation=sigmoid),
    ],
                          eta=0.01,
                          batch_size=64,
                          epochs=250)

    model.train(train_data, train_labels)

    # Predict 2000 validation set samples and calculate accuracy
    test_data_2k = test_data[:len(test_labels)]
    test_activations, test_pred = model.predict(test_data_2k)

    # Print metrics
    print('\nTest Accuracy: {:.02f}%\n'.format(
        100 * accuracy(test_pred, test_labels)))
    mat, classes = confusion_matrix(test_pred, test_labels)
    print('Precision:\n{}\n'.format(
        np.round(precision(test_pred, test_labels), 2)))
    print('Recall:\n{}\n'.format(np.round(recall(test_pred, test_labels), 2)))
    print('F1:\n{}\n'.format(np.round(f1_score(test_pred, test_labels), 2)))
    print('Confusion Matrix:')
    print(mat)

    # Predict 10000 test set samples and save predictions
    print('Predicting 10k samples...')
    test_activations, test_pred = model.predict(test_data)
    print(len(test_pred))
    save_predictions(neural_net.__name__, test_pred)
    print('Saved 10k predictions.\n')
Ejemplo n.º 11
0
def feedback():
    """ opens alpha dataset, splits 75-25 percent for train-feedback """
    print("FEEDBACK")
    global END_TRAINING

    diag = Dialogflow(evaluation=True)
    all_data = dataset.read('extraction', 'both')['intents']

    all_intents = []
    for case in all_data:
        intent = []
        for part in case['parts']:
            intent.append(part)
        all_intents.append(intent)

    num_repeats = 1

    with open(config.EXTRACTION_RESULTS_PATH.format('feedback', 'single'),
              'w') as csvfile:
        csv_writer = csv.writer(csvfile, delimiter=',')
        csv_writer.writerow([
            "repeat", "feedback_round", "text", "recognized_entities",
            "expected_entities", "tp", "fp", "fn", "recall", "precision",
            "f1_score"
        ])

        for repeat in range(num_repeats):
            n_samples = int(floor(len(all_intents) * 0.25))
            training = sample(all_intents, n_samples)
            feedback = sample(all_intents, len(all_intents) - n_samples)

            print("DATASET CASES TRAIN #", len(training))
            print("DATASET CASES FEEDBACK #", len(feedback))

            diag.update_intent(INTENT_ID, training, False)
            training_begin = diag.train_agent(training_callback)

            time_elapsed = None
            while True:
                if END_TRAINING:
                    time_elapsed = (END_TRAINING - training_begin)
                    print("Training time: ", time_elapsed)
                    break
                time.sleep(60)

            print("Testing...")

            results = []
            shuffle(feedback)
            for idx, feedback_case in enumerate(feedback):
                print("intent", idx)
                result = diag.detect_intent_texts([feedback_case])[0]
                rec = metrics.recall(result['tp'], result['fn'])
                prec = metrics.precision(result['tp'], result['fp'])
                f1_sc = metrics.f1_score(prec, rec)
                print(result['text'])
                print('recall: ', rec)
                print('precision: ', prec)
                print('f1_score: ', f1_sc)

                csv_writer.writerow([
                    repeat, idx, result['text'], result['recognized_entities'],
                    result['expected_entities'], result['tp'], result['fp'],
                    result['fn'], rec, prec, f1_sc
                ])

                if result['fp'] != 0 or result['fn'] != 0:
                    training.append(feedback_case)
                    print("DATASET CASES TRAIN #", len(training))

                    diag.update_intent(INTENT_ID, training, False)
                    END_TRAINING = None
                    training_begin = diag.train_agent(training_callback)

                    time_elapsed = None
                    while True:
                        if END_TRAINING:
                            time_elapsed = (END_TRAINING - training_begin)
                            print("Training time: ", time_elapsed)
                            break
                        time.sleep(60)

            csv_writer.writerow(["DATASET CASES TRAIN #", len(training)])
Ejemplo n.º 12
0
def run(dtype):
    """ opens specific dataset, splits 75-25 percent for train-test and runs extraction """
    print("DATASET ", dtype)
    global END_TRAINING
    data = dataset.read('extraction', dtype)['intents']
    intents = []
    for case in data:
        intent = []
        for part in case['parts']:
            intent.append(part)
        intents.append(intent)

    print("DATASET CASES #", len(intents))

    highest_precision = 0
    highest_recall = 0
    highest_f1 = 0
    highest_try = 0
    num_tries = 0

    while num_tries < 30:
        num_tries += 1
        END_TRAINING = None

        n_samples = int(ceil(len(intents) * 0.75))
        training = sample(intents, n_samples)
        validation = sample(intents, len(intents) - n_samples)

        diag = Dialogflow(evaluation=True)
        diag.update_intent(INTENT_ID, training, False)
        training_begin = diag.train_agent(training_callback)

        time_elapsed = None
        while True:
            if END_TRAINING:
                time_elapsed = (END_TRAINING - training_begin)
                print("Training time: ", time_elapsed)
                break
            # time.sleep(50)

        print("Testing...")
        results = diag.detect_intent_texts(validation)
        with open(config.EXTRACTION_RESULTS_PATH.format(dtype, num_tries),
                  'w') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')
            csv_writer.writerow([
                "text", "recognized_entities", "expected_entities",
                "training_time", "recall", "precision", "f1_score"
            ])

            mean_precision = 0
            mean_recall = 0
            num_entries = len(results)

            for result in results:
                rec = metrics.recall(result['tp'], result['fn'])
                prec = metrics.precision(result['tp'], result['fp'])
                f1_sc = metrics.f1_score(prec, rec)

                mean_precision += prec
                mean_recall += rec
                print(result['text'])
                print('recall: ', rec)
                print('precision: ', prec)
                print('f1_score: ', f1_sc)
                csv_writer.writerow([
                    result['text'], result['recognized_entities'],
                    result['expected_entities'], time_elapsed, rec, prec, f1_sc
                ])

            mean_precision /= num_entries
            mean_recall /= num_entries
            mean_f1 = metrics.f1_score(mean_precision, mean_recall)
            csv_writer.writerow(["Mean Precision", mean_precision])
            csv_writer.writerow(["Mean Recall", mean_recall])
            csv_writer.writerow(["Mean F1", mean_f1])

            print("Mean Precision", mean_precision)
            print("Mean Recall", mean_recall)
            print("Mean F1", mean_f1)

            if mean_f1 > highest_f1:
                highest_f1 = mean_f1
                highest_precision = mean_precision
                highest_recall = mean_recall
                highest_try = num_tries

    print("Highest Precision", highest_precision)
    print("Highest Recall", highest_recall)
    print("Highest F1", highest_f1)
    print("Highest Try", highest_try)
Ejemplo n.º 13
0
rff_dim = 256
feature_sizes = [4, 8, 12, 16]

validation_split = .05

# Load training data
X, X_coord, y = load_tiselac(training_set=True, shuffle=True, random_state=0)

# Load model
fname_model = "output/models_rff/4-8-12-16.256.01094-0.3299.weights.hdf5"
dict_dims = {(d * f_sz + 1): sz - f_sz + 1 for f_sz in feature_sizes}
model = model_mk_rff(input_dimensions=dict_dims, embedding_dim=rff_dim, n_classes=n_classes, side_info_dim=2)
model.compile(loss="categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"])
model.load_weights(fname_model)

for n_valid in [X.shape[0], int(validation_split * X.shape[0])]:
    X_valid = X[-n_valid:]
    X_coord_valid = X_coord[-n_valid:]
    y_valid = y[-n_valid:]

    feats_8_12_16 = ecml17_tiselac_data_preparation(X_valid, d=d, feature_sizes=tuple(feature_sizes), use_time=True)

    y_pred = model.predict(feats_8_12_16 + [X_coord_valid], verbose=False)
    eval_model = model.evaluate(feats_8_12_16 + [X_coord_valid], y_valid, verbose=False)
    if n_valid == X.shape[0]:
        print("Full training set")
    else:
        print("Validation set")
    print("Correct classification rate:", eval_model[1])
    print("F1-score:", f1_score(y_true=y_valid, y_pred=y_pred))
    def evaluating(self, model, dataset, split):
        """
          input:
            model: (object) pytorch model
            dataset: (object) dataset
            split: (str) split of dataset in ['train', 'val', 'test']
          return [overall_accuracy, precision, recall, f1-score, jaccard, kappa]
        """
        args = self.args
        oa, precision, recall, f1, jac, kappa = 0, 0, 0, 0, 0, 0
        model.eval()
        data_loader = DataLoader(dataset,
                                 args.batch_size,
                                 num_workers=4,
                                 shuffle=False)
        batch_iterator = iter(data_loader)
        steps = len(dataset) // args.batch_size

        start = time.time()
        for step in range(steps):
            x, y = next(batch_iterator)
            x = Variable(x, volatile=True)
            y = Variable(y, volatile=True)
            if args.cuda:
                x = x.cuda()
                y = y.cuda()
            # calculate pixel accuracy of generator
            gen_y = model(x)
            if self.is_multi:
                gen_y = gen_y[0]
            oa += metrics.overall_accuracy(gen_y.data, y.data)
            precision += metrics.precision(gen_y.data, y.data)
            recall += metrics.recall(gen_y.data, y.data)
            f1 += metrics.f1_score(gen_y.data, y.data)
            jac += metrics.jaccard(gen_y.data, y.data)
            kappa += metrics.kappa(gen_y.data, y.data)

        _time = time.time() - start

        if not os.path.exists(os.path.join(Logs_DIR, 'statistic')):
            os.makedirs(os.path.join(Logs_DIR, 'statistic'))

        # recording performance of the model
        nb_samples = steps * args.batch_size
        basic_info = [
            self.date, self.method, self.epoch, self.iter, nb_samples, _time
        ]
        basic_info_names = [
            'date', 'method', 'epochs', 'iters', 'nb_samples', 'time(sec)'
        ]

        perform = [
            round(idx / steps, 3)
            for idx in [oa, precision, recall, f1, jac, kappa]
        ]
        perform_names = [
            "overall_accuracy", "precision", "recall", "f1-score", "jaccard",
            "kappa"
        ]
        cur_log = pd.DataFrame([basic_info + perform],
                               columns=basic_info_names + perform_names)
        # save performance
        if os.path.exists(
                os.path.join(Logs_DIR, 'statistic', "{}.csv".format(split))):
            logs = pd.read_csv(
                os.path.join(Logs_DIR, 'statistic', "{}.csv".format(split)))
        else:
            logs = pd.DataFrame([])
        logs = logs.append(cur_log, ignore_index=True)
        logs.to_csv(os.path.join(Logs_DIR, 'statistic',
                                 "{}.csv".format(split)),
                    index=False,
                    float_format='%.3f')
Ejemplo n.º 15
0
def train(model,
          epoch_num,
          start_epoch,
          optimizer,
          criterion,
          exp_lr_scheduler,
          data_set,
          data_loader,
          save_dir,
          print_inter=200,
          val_inter=3500,
          ):
    writer = SummaryWriter(save_dir)
    best_model_wts = model.state_dict()
    best_f1 = 0
    val_loss = 0
    train_loss = 0
    # running_loss = 20
    step = -1
    for epoch in range(start_epoch,epoch_num):
        # train phase
        # exp_lr_scheduler.step(epoch)
        model.train(True)  # Set model to training mode


        for batch_cnt, data in enumerate(data_loader['train']):
            step += 1
            if step % val_inter == 0:
                # val phase
                model.eval()
                loss_fn = weighted_mse_loss
                # loss_fn = cross_entropy_loss_RCF
                # loss_fn = criterion
                t0 = time.time()

                test_precisions, test_recalls, test_f1_scores, val_loss = predict(loss_fn, model, data_set['val'], data_loader['val'], counting=False)

                t1 = time.time()
                since = t1 - t0

                logging.info('--' * 30)
                # logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                logging.info('%s epoch[%d] | val_loss: %.4f | precisions: %.4f | recalls: %.4f | f1_scores: %.4f | time: %d'
                             % (dt(), epoch, val_loss, test_precisions, test_recalls, test_f1_scores,  since))

                if test_f1_scores > best_f1:
                    best_f1 = test_f1_scores
                    best_model_wts = deepcopy(model.state_dict())

                # save model
                save_path1 = os.path.join(save_dir,
                                          'weights-%d-%d-[%.3f].pth' % (epoch, batch_cnt, test_f1_scores))
                torch.save(model.state_dict(), save_path1)
                save_path2 = os.path.join(save_dir,
                                          'optimizer-state.pth')
                torch.save(optimizer.state_dict(), save_path2)

                logging.info('saved model to %s' % (save_path1))
                logging.info('--' * 30)


            model.train(True)
            imgs, masks, _ = data

            imgs = Variable(imgs.cuda())
            masks = Variable(masks.cuda(),requires_grad=False)

            # zero the parameter gradients
            optimizer.zero_grad()

            outputs = model(imgs)

            # outputs = outputs.view(-1, outputs.size()[2], outputs.size()[3])
            # # print outputs.size(), masks.size()
            # if outputs.size() != masks.size():
            #     outputs = F.upsample(outputs, size=masks.size()[-2:], mode='bilinear')
            mask_loss = torch.zeros(1).cuda()
            for o in outputs:
                o = o.view(-1, o.size()[2], o.size()[3])
                mask_loss = mask_loss + loss_fn(o, masks)

            mask_loss = mask_loss
            # loss.backward()

            # print outputs.size()
            # print masks.size()
            # mask_loss = criterion(outputs, masks)
            # mask_loss = cross_entropy_loss_RCF(outputs, masks)
            # mask_loss = weighted_mse_loss(outputs, masks)
            # loss = F.mean_absolute_error(outputs, masks)
            ###############################################cross entropy loss
            train_loss = mask_loss
            ###############################################
            train_loss.backward()
            optimizer.step()
            # running_loss = running_loss*0.95 + 0.05*loss.data[0]
            # running_loss = loss.data[0]

            # cal pixel acc
            # _, preds = torch.max(outputs,1)  # (bs, H, W)
            # # preds = F.softmax(outputs,dim=1).round()[:, 1, :].long()
            # batch_corrects = torch.sum((preds==masks).long()).data[0]
            # batch_acc = 1.*batch_corrects / (masks.size(0)*masks.size(1)*masks.size(2))
            output = outputs[-1]
            output = output.view(-1, output.size()[2], output.size()[3])
            true_positives, predicted_positives, possible_positives, union_areas = metrics_pred(output.data.cpu().numpy(),\
                             imgs.cpu().data.numpy(), masks.cpu().data.numpy())

            train_precisions = precision(true_positives, predicted_positives)
            train_recalls = recall(true_positives, possible_positives)
            train_f1_scores = f1_score(train_recalls, train_precisions)

            if step % print_inter == 0:
                logging.info('%s [%d-%d] | train_loss: %.4f | precisions: %.4f | recalls: %.4f | f1_scores: %.4f'
                             % (dt(), epoch, batch_cnt, train_loss, train_precisions, train_recalls, train_f1_scores))

            # plot image
            if step % (print_inter) == 0:
                smp_img = imgs[0]  # (3, H, W)
                true_hm = masks[0]  #(H,W)
                pred_hm = output[0]

                imgs_to_plot = getPlotImg(smp_img, pred_hm, true_hm)

                # for TensorBoard
                imgs_to_plot = torch.from_numpy(imgs_to_plot.transpose((0,3,1,2))/255.0)
                grid_image = make_grid(imgs_to_plot, 2)
                writer.add_image('plotting',grid_image, step)
                writer.add_scalar('train_loss', train_loss , step)
                writer.add_scalar('val_loss', val_loss, step)


    # save best model
    save_path = os.path.join(save_dir,
                             'bestweights-[%.3f].pth' % (best_f1))
    torch.save(best_model_wts, save_path)
    logging.info('saved model to %s' % (save_path))

    return best_f1, best_model_wts
Ejemplo n.º 16
0
def print_eval(model, X, y):
    y_pred = model.predict(X, verbose=False)
    eval_model = model.evaluate(X, y, verbose=False)
    print("Correct classification rate:", eval_model[1])
    print("F1-score:", f1_score(y_true=y, y_pred=y_pred))