Пример #1
0
                epoch, np.mean(losses), ACC_valid, U.BColors.BGREEN,
                U.BColors.ENDC, ACC_test, U.BColors.BGREEN, U.BColors.ENDC)

        else:
            QWK_test = U.nkappa(TEST_Y, TEST_PRED)
            QWK_test_int = K.ikappa(TEST_Y, TEST_PRED, yw)
            SCORE_test = QWK_test
            valid_msg = 'Epoch {0} \tVALID Kappa : {3}{2:0.4}{4} [{8:0.4}]\tTEST Kappa : {6}{5:0.4}{7} [{9:0.4}]'.format(
                epoch, np.mean(losses), QWK_valid, U.BColors.BGREEN,
                U.BColors.ENDC, QWK_test, U.BColors.BGREEN, U.BColors.ENDC,
                QWK_valid_int, QWK_test_int)

        if SCORE_valid > best_valid_score:
            best_valid_score = SCORE_valid
            best_valid_msg = valid_msg
            if ROC: U.save_roc(FLAGS.roc_test_file, TEST_Y, TEST_PRED, I)

        if FLAGS.save_model:
            if best_ckpt_saver.handle(SCORE_valid, sess, global_step):
                if ROC: U.save_roc(FLAGS.roc_test_file, TEST_Y, TEST_PRED, I)
        ''' 
        ## HowTo: load from checkpoint      
        saver.restore(sess, checkmate.get_best_checkpoint(FLAGS.load_chkpt_dir))
        '''

        print('[CURRENT]\t' + valid_msg)
        print('[BEST]\t\t' + best_valid_msg)

        if ACC:
            test_msg = 'Epoch {0} \tTEST Acc : {3}{2:0.4}{4}'.format(
                epoch, np.mean(losses), ACC_test, U.BColors.BGREEN,
Пример #2
0
def evalute(epoch,
            epochs,
            model,
            model_name,
            loader,
            dataset,
            criterion,
            threshold=0.5,
            write_log='',
            _type='Val',
            draw_roc=False,
            title=None,
            name=None,
            save_pred=False,
            save_scores=False):
    begin = time.time()
    if write_log != '':
        logs = open(write_log, 'a')
    model.eval()
    with torch.no_grad():
        running_corrects = 0
        running_loss = 0.0
        all_preds = []
        all_targets = []
        all_scores = []
        for i, (images, labels) in enumerate(loader):
            images = images.cuda()
            labels = labels.cuda()

            if model_name == 'ws_dan_resnet50':
                outputs, _, attention_map = model(images)
                batch_loss = criterion[0](outputs, labels)

                # mask crop
                attention_map = torch.mean(attention_map, dim=1).unsqueeze(1)
                attention_map = F.interpolate(attention_map,
                                              size=(images.size(2),
                                                    images.size(3)))
                thetas = mask2bbox(attention_map)
                thetas = torch.from_numpy(thetas).cuda()
                grid = F.affine_grid(thetas, images.size(), align_corners=True)
                crop_images = F.grid_sample(images, grid, align_corners=True)
                outputs1, _, _ = model(crop_images)
                mask_loss = criterion[0](outputs1, labels)

                loss = (batch_loss + mask_loss) / 2
                outputs = outputs + outputs1
            else:
                outputs = model(images)
                loss = criterion[0](outputs, labels)

            outputs = torch.softmax(outputs, dim=1)
            # _, preds = torch.max(outputs, 1)
            preds = torch.tensor(
                [1 if item[1] >= threshold else 0 for item in outputs]).cuda()
            running_loss += loss.item() * images.size(0)
            running_corrects += torch.sum(preds == labels.data)
            all_preds.append(preds.cpu().numpy().copy())
            all_targets.append(labels.cpu().numpy().copy())
            all_scores.append(outputs.cpu().numpy().copy())
        all_preds = np.concatenate(all_preds)
        all_targets = np.concatenate(all_targets)
        all_scores = np.concatenate(all_scores)

        auc = roc_auc_score(all_targets, all_scores[:, 1])
        cm, sens, spec, f1 = avg_f1(all_targets, all_preds)
        epoch_loss = running_loss / len(loader.dataset)
        epoch_acc = running_corrects.double() / len(loader.dataset)

        auc_confidence, sens_confidence, spec_confidence = confidence_interval(
            cm, auc, sens, spec)

        log = '{} Epoch: {}/{}, Loss: {:.4f}, ' \
              'Acc: {:.0f}/{}, {:.4f}, Auc: {:.4f}, ' \
              'sens: {:.4f}, spec: {:.4f}, f1: {:.4f}, ' \
              'Time: {:.0f}s'.format(_type, epoch+1, epochs, epoch_loss,
                                     running_corrects, len(loader.dataset),
                                     epoch_acc, auc, sens, spec, f1, time.time()-begin)

        log_confidence = 'auc_confidence: {}, sens_confidence: {}, spec_confidence: {}'.format\
                         (np.round(auc_confidence, 4), np.round(sens_confidence, 4), np.round(spec_confidence, 4))

        print(log)
        print(log_confidence)
        # print(cm)
        if write_log != '':
            logs.write(log + '\n')

        if draw_roc:
            save_roc(all_targets, all_scores[:, 1], title, name)

        if save_scores:
            true_positives, true_negatives, missing_report, mistake_report = save_result(
                dataset, all_targets, all_scores, all_preds, name)

            if save_pred:
                return epoch_acc.item(), auc.item(), sens.item(), spec.item(), f1.item(), cm, all_preds, \
                       auc_confidence, sens_confidence, spec_confidence, \
                       true_positives, true_negatives, missing_report, mistake_report
            else:
                return epoch_acc.item(), auc.item(), sens.item(), spec.item(
                ), f1.item(), cm
        else:
            if save_pred:
                return epoch_acc.item(), auc.item(), sens.item(), spec.item(), f1.item(), cm, all_preds, \
                       auc_confidence, sens_confidence, spec_confidence

            else:
                return epoch_acc.item(), auc.item(), sens.item(), spec.item(
                ), f1.item(), cm
def write_summary(train_loss, valid_loss, anomal_loss, start_thr, end_thr,
                  false_positives, true_positives, training_time,
                  best_features, io_dim):
    '''
		Saves losses curves, roc curves and model parameters 
	'''
    # Init directory
    summary_dir = './Summaries/'
    model_dir = summary_dir + model_name + '/'
    while os.path.exists(model_dir):
        print('[ ! ] Model summary already exists.')
        tmp_name = input('[ ? ] Choose new name for model directory: ')
        model_dir = summary_dir + tmp_name + '/'
    os.makedirs(model_dir)

    # Save loss curves
    loss_name = model_dir + 'losses.png'
    utils.save_losses(train_loss, valid_loss, anomal_loss, loss_name)

    # Save ROC curve
    roc_name = model_dir + 'roc.png'
    utils.save_roc(false_positives, true_positives, roc_name)

    # Save parameters file
    parameters_file = model_dir + 'parameters.txt'
    parameters = open(parameters_file, 'w')

    parameters.write('Document creation : {}\n'.format(time.strftime("%c")))
    parameters.write('Model name : {}\n'.format(model_name))
    parameters.write('Model type : seq2seq\n')
    parameters.write('Dataset : {}\n'.format(dataset_path))

    parameters.write('\nSequence length : {}\n'.format(seq_length))
    parameters.write('Sequence dimension : {}'.format(io_dim))
    parameters.write(
        'Stacked layers count : {}\n'.format(layers_stacked_count))
    parameters.write('Batch size : {}\n'.format(batch_size))
    parameters.write('Epochs : {}\n'.format(epoch))

    parameters.write('\nOptimizer : RMSprop\n')
    parameters.write('Learning rate : {}\n'.format(learning_rate))

    parameters.write('\nTraining time : {}\n'.format(
        time.strftime('%H:%M:%S', time.localtime(training_time))))

    parameters.write('\nROC settings :\n')
    parameters.write('\tThreshold start : {}\n'.format(start_thr))
    parameters.write('\tThreshold end : {}\n'.format(end_thr))
    parameters.write('\tStep : {}\n'.format(step_thr))

    if best_features is not None:
        parameters.write('\nFeature selection : \n')
        # Change std to print directly in file
        old_std = sys.stdout
        sys.stdout = parameters
        # Draw table
        table = BeautifulTable()
        table.column_headers = ['Selected features']
        for feature in best_features:
            table.append_row([feature])
        print(table)
        # restore original stdout
        sys.stdout = old_std

    parameters.close()
    print('[ + ] Summary saved !')
Пример #4
0
def mutil_attention_evalute(epoch,
                            epochs,
                            model,
                            model_name,
                            loader,
                            dataset,
                            criterion,
                            threshold=0.5,
                            write_log='',
                            _type='Val',
                            draw_roc=False,
                            title=None,
                            name=None,
                            save_pred=False,
                            save_scores=False):
    begin = time.time()
    if write_log != '':
        logs = open(write_log, 'a')
    model.eval()
    with torch.no_grad():
        running_corrects = 0
        running_loss = 0.0
        all_preds = []
        all_targets = []
        all_scores = []
        for i, (images, masks, labels) in enumerate(loader):
            images = images.cuda()
            masks = masks.cuda()
            labels = labels.cuda()

            outputs, _ = model(images, masks, remove_bg_att=True)
            loss = criterion[0](outputs, labels)

            outputs = torch.softmax(outputs, dim=1)
            # _, preds = torch.max(outputs, 1)
            preds = torch.tensor(
                [1 if item[1] >= threshold else 0 for item in outputs]).cuda()
            running_loss += loss.item() * images.size(0)
            running_corrects += torch.sum(preds == labels.data)
            all_preds.append(preds.cpu().numpy().copy())
            all_targets.append(labels.cpu().numpy().copy())
            all_scores.append(outputs.cpu().numpy().copy())
        all_preds = np.concatenate(all_preds)
        all_targets = np.concatenate(all_targets)
        all_scores = np.concatenate(all_scores)

        auc = roc_auc_score(all_targets, all_scores[:, 1])
        cm, sens, spec, f1 = avg_f1(all_targets, all_preds)
        epoch_loss = running_loss / len(loader.dataset)
        epoch_acc = running_corrects.double() / len(loader.dataset)
        log = '{} Epoch: {}/{}, Loss: {:.4f}, ' \
              'Acc: {:.0f}/{}, {:.4f}, Auc: {:.4f}, ' \
              'sens: {:.4f}, spec: {:.4f}, f1: {:.4f}, ' \
              'Time: {:.0f}s'.format(_type, epoch+1, epochs, epoch_loss,
                                     running_corrects, len(loader.dataset),
                                     epoch_acc, auc, sens, spec, f1, time.time()-begin)

        print(log)
        # print(cm)
        if write_log != '':
            logs.write(log + '\n')

        if draw_roc:
            save_roc(all_targets, all_scores[:, 1], title, name)

        if save_scores:
            true_positives, true_negatives, missing_report, mistake_report = save_result(
                dataset, all_targets, all_scores, all_preds, name)

        if save_pred:
            return epoch_acc.item(), auc.item(), sens.item(), spec.item(), f1.item(), cm, all_preds, \
                   true_positives, true_negatives, missing_report, mistake_report
        else:
            return epoch_acc.item(), auc.item(), sens.item(), spec.item(
            ), f1.item(), cm