Example #1
0
def evaluate(args, model, criterion, val_loader):
    model.eval()
    losses = AverageMeter()
    jaccars = AverageMeter()
    dices = AverageMeter()
    eva = Evaluation()

    for i, (images, labels) in enumerate(val_loader):
        if args.cuda:
            images = images.cuda()
            labels = labels.cuda()
            criterion = criterion.cuda()

        images = Variable(images)
        labels = Variable(labels)
        outputs = model(images)
        loss = criterion(outputs, labels)
        losses.update(loss.data.cpu().numpy())

        jacc_index = eva.jaccard_similarity_coefficient(
            outputs.cpu().data.numpy().squeeze(),
            labels.cpu().data.numpy())
        dice_index = eva.dice_coefficient(outputs.cpu().data.numpy().squeeze(),
                                          labels.cpu().data.numpy())
        jaccars.update(jacc_index)
        dices.update(dice_index)

    return losses.avg, jaccars.avg, dices.avg
Example #2
0
def main():
    # Directory Setting
    train_dir = "./data/multi_train.csv"
    test_dir = "./data/multi_test.csv"
    model_dir = "./model_save"

    # HyperParameter
    epoch = 1
    batch = 128
    max_len = 50
    hidden_units = 64
    target_names = ['0', '1', '2', '3']

    # Flow
    print("0. Setting Environment")
    set_env()

    print("1. load data")
    train_x, train_y, test_x, test_y, val_x, val_y = load_data(
        train_dir, test_dir, len(target_names))

    print("2. pre processing")
    train_x, val_x, test_x = train_x.tolist(), val_x.tolist(), test_x.tolist()

    train_x = [' '.join(t.split()[0:max_len]) for t in train_x]
    train_x = np.array(train_x, dtype=object)[:, np.newaxis]

    val_x = [' '.join(t.split()[0:max_len]) for t in val_x]
    val_x = np.array(val_x, dtype=object)[:, np.newaxis]

    test_x = [' '.join(t.split()[0:max_len]) for t in test_x]
    test_x = np.array(test_x, dtype=object)[:, np.newaxis]

    print("3. build model")
    model = ELMo(hidden_units=hidden_units,
                 data_type="multi",
                 category_size=len(target_names))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    callbacks = create_callbacks(model_dir)
    model.fit(x=train_x,
              y=train_y,
              epochs=epoch,
              batch_size=batch,
              validation_data=(val_x, val_y),
              callbacks=callbacks)

    print("4. evaluation")
    evaluation = Evaluation(model, test_x, test_y)
    accuracy, cf_matrix, report = evaluation.eval_classification(
        data_type="multi")
    print("## Target Names : ", target_names)
    print("## Classification Report \n", report)
    print("## Confusion Matrix \n", cf_matrix)
    print("## Accuracy \n", accuracy)
def main():
    # Directory Setting
    train_dir = "./data/multi_train.csv"
    test_dir = "./data/multi_test.csv"
    model_dir = "./model_save"
    embedding_dir = "./glove.6B.50d.txt"

    # HyperParameter
    epoch = 1
    batch = 256
    embedding_dim = 50
    target_names = ['0', '1', '2', '3']

    # Flow
    print("0. Setting Environment")
    set_env()

    print("1. load data")
    train_x, train_y, test_x, test_y, val_x, val_y = load_data(
        train_dir, test_dir, len(target_names))

    print("2. pre processing")
    train_x, test_x, val_x, tokenizer = pre_processing(train_x, test_x, val_x)

    print("3. text to vector")
    embedding_matrix = text_to_vector(tokenizer.word_index,
                                      embedding_dir,
                                      word_dimension=embedding_dim)

    print("4. build model")
    model = TextCNN(sequence_len=train_x.shape[1],
                    embedding_matrix=embedding_matrix,
                    embedding_dim=embedding_dim,
                    filter_sizes=[3, 4, 5],
                    flag="pre_training",
                    data_type="multi",
                    category_num=len(target_names))
    model.compile(optimizer="adam",
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    callbacks = create_callbacks(model_dir)

    model.fit(x=train_x,
              y=train_y,
              epochs=epoch,
              batch_size=batch,
              validation_data=(val_x, val_y),
              callbacks=callbacks)

    print("5. evaluation")
    evaluation = Evaluation(model, test_x, test_y)
    accuracy, cf_matrix, report = evaluation.eval_classification(
        data_type="multi")
    print("## Target Names : ", target_names)
    print("## Classification Report \n", report)
    print("## Confusion Matrix \n", cf_matrix)
    print("## Accuracy \n", accuracy)
Example #4
0
 def get_ys(self, tag):
     if tag is not None:
         y_name = Evaluation.shorten_bioconcept(tag)
         ys = [1 if y_col == y_name else 0 for y_col in self.COLUMNS_BY_SOURCE['y']]
     else:
         ys = np.zeros(len(self.COLUMNS_BY_SOURCE['y']))
     return list(ys)
Example #5
0
 def train(self, df):
     metrics_by_y = dict.fromkeys(self.COLUMNS_BY_SOURCE['y'])
     for i, this_y in enumerate(self.COLUMNS_BY_SOURCE['y']):
         print(f'\n{this_y}\n')
         other_ys = [y for y in self.COLUMNS_BY_SOURCE['y'] if y != this_y]
         selected_columns = [
             c for c in df.columns
             if c not in other_ys and c not in self.COLUMNS_TO_EXCLUDE and c not in self.COLUMNS_TO_NOT_ANALYSE]
         kingdom = 'plant' if i <= 2 else 'animal'
         kingdom_df = df.loc[df.kingdom == kingdom]
         if kingdom == 'animal':
             self.investigate_shit_features(kingdom_df)
         X_train, X_test, y_train, y_test = self.preprocess_dataset_and_split(kingdom_df, selected_columns)
         X_train, y_train = self.up_or_down_sample(X_train, y_train)
         logreg = LogisticRegression(solver='liblinear')
         filtered_X_train = self.auto_or_manual_feature_selection(logreg, X_train, y_train)
         logit_model = statsmodels.api.Logit(y_train, filtered_X_train)
         result = self.fit_and_check_singular_matrix(logit_model, filtered_X_train, y_train)
         print(result.summary2())
         print('-------------------------------------------------------------')
         logreg.fit(filtered_X_train, y_train)
         y_pred = logreg.predict(X_test.loc[:, filtered_X_train.columns])
         metrics = Evaluation.calculate_metrics(y_test, y_pred)
         metrics_by_y[this_y] = metrics
     metrics_df = pandas.DataFrame(metrics_by_y)
     average_f1 = np.mean(metrics_df.T.f1) * 100
     print(metrics_df.T)
     print(f'average f1 score: {average_f1:.0f}%')
def main():
    # Directory Setting
    train_dir = "../data/binary_train.csv"
    test_dir = "../data/binary_test.csv"
    model_dir = "./model_save"

    # HyperParameter
    epoch = 2
    batch = 256

    # Flow
    print("0. Setting Environment")
    set_env()

    print("1. load data")
    train_x, train_y, test_x, test_y, val_x, val_y = load_data(
        train_dir, test_dir)

    print("2. pre processing")
    train_x, test_x, val_x, tokenizer = pre_processing(train_x, test_x, val_x)

    print("3. build model")
    model = TextCNN(sequence_len=train_x.shape[1],
                    embedding_matrix=len(tokenizer.word_index) + 1,
                    embedding_dim=300,
                    filter_sizes=[3, 4, 5],
                    flag="self_training",
                    data_type="binary")
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    callbacks = create_callbacks(model_dir)
    model.fit(x=train_x,
              y=train_y,
              epochs=epoch,
              batch_size=batch,
              validation_data=(val_x, val_y),
              callbacks=callbacks)

    print("4. evaluation")
    evaluation = Evaluation(model, test_x, test_y)
    accuracy, cf_matrix, report = evaluation.eval_classification(
        data_type="binary")
    print("## Classification Report \n", report)
    print("## Confusion Matrix \n", cf_matrix)
    print("## Accuracy \n", accuracy)
Example #7
0
def evaluate(args, model, criterion, val_loader):
    model.eval()
    losses = AverageMeter()
    jaccars = AverageMeter()
    dices = AverageMeter()
    eva = Evaluation()

    for i, (images, labels) in enumerate(val_loader):
        if args.cuda:
            images = images.cuda()
            labels = labels.cuda()
            criterion = criterion.cuda()

        images = Variable(images)
        labels = Variable(labels)
        outputs = model(images)
        #         print('images(eval) : ', images.shape)
        #         print('labels(eval) : ', labels.shape)
        #        print('outputs(eval) : ', outputs.shape)

        outputs = outputs.view(1, -1)
        #         outputs = outputs.view(1, *outputs.size()[-2:])
        #        print('output_size_changed(eval) : ', outputs.size())

        labels = labels.view(1, -1)
        #         labels = labels.view(1, *labels.size())
        #         print('labels.view(changed) : ', labels.size())
        jacc_index = eva.jaccard_similarity_coefficient(
            outputs.cpu().data.numpy().squeeze(),
            labels.cpu().data.numpy())
        dice_index = eva.dice_coefficient(outputs.cpu().data.numpy().squeeze(),
                                          labels.cpu().data.numpy())
        #loss = criterion(outputs, labels)
        crit = criterion(outputs, labels)
        loss = crit  #- jacc_index

        #         print('no jacc loss : ', crit)
        #         print(' jacc loss : ', loss)

        losses.update(loss.data.cpu().numpy())
        jaccars.update(jacc_index)
        dices.update(dice_index)

    return losses.avg, jaccars.avg, dices.avg
def main():
    # Hyper parameter
    batch_size = 64
    lr = 0.001
    epochs = 3
    n_classes = 2
    embedding_dim = 300
    hidden_dim = 32

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # Directory
    train_dir = "../data/binary_train_data.csv"
    test_dir = "../data/binary_test_data.csv"
    model_dir = "snapshot/text_classification.pt"

    print("1.Load data")
    train_data, valid_data, test_data, text, label = load_data(
        train_dir, test_dir)

    print("2.Pre processing")
    train_iter, val_iter, test_iter, text, label = pre_processing(
        train_data, valid_data, test_data, text, label, device, batch_size)

    print("3.Build model")
    model = BaseModel(hidden_dim=32,
                      vocab_num=len(text.vocab),
                      embedding_dim=300,
                      class_num=len(vars(label.vocab)["itos"])).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    print("4.Train")
    best_val_loss = None
    for e in range(1, epochs + 1):
        model = training(model, optimizer, train_iter, device)
        val_loss, val_accuracy = Evaluation(model, val_iter, device)
        print("[Epoch: %d] val loss : %5.2f | val accuracy : %5.2f" %
              (e, val_loss, val_accuracy))
        save_model(best_val_loss, val_loss, model, model_dir)

    model.load_state_dict(torch.load(model_dir))
    test_loss, test_acc = Evaluation(model, test_iter, device)
    print('테스트 오차: %5.2f | 테스트 정확도: %5.2f' % (test_loss, test_acc))
Example #9
0
    def __init__(self, cfg, data, mode):
        self.cfg = cfg
        self.mode = mode
        self.data = data
        self.eval_obj = Evaluation()

        if mode != 'export':
            self.build_model()
            self.saver = tf.train.Saver(var_list=tf.global_variables(),
                                        max_to_keep=1000)
Example #10
0
def train(trainset, model, optimizer, loss_function, testset):
    index = 0
    losses = []
    acc = []
    for epoch in range(6):
        total_loss = torch.Tensor([0])
        for instance in trainset:
            print(index)
            index += 1
            for question in instance['questions']:
                text = stemming(instance['text'])
                ques = stemming(question['question'])
                for answer in question['answers']:
                    model.zero_grad()
                    ans = stemming(answer['answer'])
                    output = model(text, ques, ans)
                    if answer['correct'] == 'True':
                        y = autograd.Variable(torch.FloatTensor([1]))
                    else:
                        y = autograd.Variable(torch.FloatTensor([0]))
                    print("output", output.data[0][0])
                    # avoid 0 gradient
                    if output.data[0][0] == 0:
                        output = output + autograd.Variable(
                            torch.FloatTensor([0.0001]))
                    if output.data[0][0] == 1:
                        output = output - autograd.Variable(
                            torch.FloatTensor([0.0001]))
                    loss = loss_function(output, y)
                    # print('output', output.data[0])
                    # print('loss', loss.data[0])
                    loss.backward()
                    optimizer.step()
                    # for param in model.parameters():
                    #     print('param', param.grad.data.sum())
                    total_loss += loss.data[0]
        losses.append(total_loss)
        y, predicty = test(testset, model)
        # print(len(y))
        eval = Evaluation()
        acc.append(eval.accuracy(y, predicty, data))
    return losses, acc
Example #11
0
def run_experiments(**exp_params) -> None:
    logger = init_logger(exp_params)

    trajectory_generator, training_task_descriptors, test_task_confs, controls, dataset, \
    session, model, meta_learner, lhs_tasks, test_observations = init_experiments(**exp_params)

    meta_learner.train_model()

    evaluation = None
    if exp_params["evaluation"]:
        evaluation = Evaluation(test_task_grid=test_task_confs,
                                meta_learner=meta_learner,
                                kwargs=exp_params,
                                test_observations=test_observations)

        evaluation.evaluation_on_test_tasks(dataset=dataset,
                                            test_tasks_params=test_task_confs,
                                            iteration=0,
                                            controls=controls)

    for iteration in range(exp_params["task_budget"]):
        latent_task_variables_mean, latent_task_variables_var = meta_learner.get_H_space_subset(
            end_task_id=exp_params["n_initial_training_envs"] + iteration)

        candidates = discretise_region(
            latent_task_variables_mean=latent_task_variables_mean,
            slack_min_values=exp_params["slack_min_intervals"],
            slack_max_values=exp_params["slack_max_intervals"],
            grid_resolution=exp_params["candidate_grid_size"])
        candidates = filter_candidates(
            latent_task_variables_mean=latent_task_variables_mean,
            task_configurations=training_task_descriptors,
            candidates=candidates,
            config_space=exp_params["observed_configuration_space_interval"],
            verbose=exp_params["verbose"],
            GPModel=model,
            session=session)

        logger.info(f"Number of candidates: {candidates.shape}")
        selected_task_descriptor = acquire_task(
            iteration=iteration,
            latent_task_variables_mean=latent_task_variables_mean,
            latent_task_variables_var=latent_task_variables_var,
            discretised_latent_space_region=candidates,
            task_descriptors=training_task_descriptors,
            meta_learner=meta_learner,
            lhs_tasks=lhs_tasks,
            model=model,
            **exp_params)
        logger.info(f"Acquired task configuration: {selected_task_descriptor}")
        dataset.add_configuration(new_configuration=selected_task_descriptor)
        acquired_task_observations = trajectory_generator.observe_trajectories(
            task_configurations=selected_task_descriptor,
            controls=controls,
            dim_states=exp_params["dim_states"])[0]
        meta_learner, training_task_descriptors = add_new_task(
            iteration=iteration,
            meta_learner=meta_learner,
            acquired_task_observations=acquired_task_observations,
            controls=controls,
            training_task_descriptors=training_task_descriptors,
            selected_task_descriptor=selected_task_descriptor,
            **exp_params)
        meta_learner.train_model()

        if exp_params["evaluation"]:
            evaluation.evaluation_on_test_tasks(
                dataset=dataset,
                test_tasks_params=test_task_confs,
                iteration=(iteration + 1),
                controls=controls)
Example #12
0
                               which_set='train',
                               image_size=FLAGS.image_size,
                               shuffle=False)
data_loader_val = DataLoader(FLAGS,
                             which_set='test',
                             image_size=FLAGS.image_size,
                             shuffle=False)
p_data_loader_train = ParallDataWraper(data_loader_train,
                                       batch_size=FLAGS.batch_size,
                                       thread=3)
p_data_loader_val = ParallDataWraper(data_loader_val,
                                     batch_size=FLAGS.batch_size,
                                     thread=3)

## Configure evaluator
evaluator = Evaluation(FLAGS, data_loader_train)

## Configure the CNN model
img_shape = [FLAGS.image_size, FLAGS.image_size]
feed_img_batch = tf.placeholder(tf.float32, shape=[None] + img_shape + [3])
with tf.variable_scope('PRETRAINED_CNN'):
    print('--> init the image model ...')
    CNN, CNN_all_vars = classifier.get_main_network(
        'inception',
        input_tensor=feed_img_batch,
        num_classes=3,
        use_weights=False)
    CNN_last_conv = CNN.layers[-3].output

CNN_all_vars = [
    var for var in tf.trainable_variables() if 'PRETRAINED_CNN' in var.name
Example #13
0
 def run(self):
     items, _ = self.fit_to_validation()
     evaluation = Evaluation(items, verbose=False)
     evaluation.run()
Example #14
0
def main():
    # Directory Setting
    train_dir = "../data/multi_train.csv"
    test_dir = "../data/multi_test.csv"
    model_dir = "./model_save"

    # HyperParameter
    epoch = 2
    batch = 256
    max_len = 50
    hidden_units = 64
    target_names = ['0', '1', '2', '3']

    # Flow
    print("0. Setting Environment")
    set_env()

    print("1. load data")
    train_x, train_y, test_x, test_y, val_x, val_y = load_data(train_dir, test_dir, len(target_names))

    print("2. pre processing")
    train_x, val_x, test_x = train_x.tolist(), val_x.tolist(), test_x.tolist()

    train_x = [' '.join(t.split()[0:max_len]) for t in train_x]
    train_x = np.array(train_x, dtype=object)[:, np.newaxis]

    val_x = [' '.join(t.split()[0:max_len]) for t in val_x]
    val_x = np.array(val_x, dtype=object)[:, np.newaxis]

    test_x = [' '.join(t.split()[0:max_len]) for t in test_x]
    test_x = np.array(test_x, dtype=object)[:, np.newaxis]

    tokenizer = create_tokenizer_from_hub_module()

    train_examples = convert_text_to_examples(train_x, train_y)
    val_examples = convert_text_to_examples(val_x, val_y)
    test_examples = convert_text_to_examples(test_x, test_y)

    train_input_ids, train_input_masks, train_segment_ids, train_labels = convert_examples_to_features(tokenizer, train_examples, max_len)
    val_input_ids, val_input_masks, val_segment_ids, val_labels = convert_examples_to_features(tokenizer, val_examples, max_len)
    test_input_ids, test_input_masks, test_segment_ids, test_labels = convert_examples_to_features(tokenizer, test_examples, max_len)

    print("3. build model")
    model = BERT(max_len, data_type="multi", category_size=len(target_names))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    initialize_vars(sess)

    cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=model_dir+"/model-weights.{epoch:02d}-{val_acc:.6f}.hdf5", monitor='val_acc', save_best_only=True, verbose=1)
    model.fit(
        [train_input_ids, train_input_masks, train_segment_ids], train_labels,
        validation_data=([val_input_ids, val_input_masks, val_segment_ids], val_labels),
        epochs=epoch,
        batch_size=batch,
        callbacks=[cp_callback]
    )

    print("4. evaluation")
    evaluation = Evaluation(model, [test_input_ids, test_input_masks, test_segment_ids], test_y)
    accuracy, cf_matrix, report = evaluation.eval_classification_bert(data_type="multi")
    print("## Classification Report \n", report)
    print("## Confusion Matrix \n", cf_matrix)
    print("## Accuracy \n", accuracy)
Example #15
0
 def run(self):
     model_dir_by_bioconcept = self.train_models_or_get_dirs()
     predictions = self.predict_validation_data(model_dir_by_bioconcept)
     evaluation = Evaluation(predictions, verbose=False)
     evaluation.run()
Example #16
0
def run_one_epoch(parameters, config, device, epoch, mode):
    model = parameters["model"]

    if mode == "train":
        model.train()
        optimizer = parameters["optimizer"]
    elif mode == "valid" or mode == "test":
        model.eval()
    else:
        raise NotImplementedError

    dataset = copy.deepcopy(parameters["dataset_{}".format(mode)])
    pred = {}
    total_loss = 0
    evaluation = Evaluation(config)
    for step, data in enumerate(dataset):
        for key in data:
            if isinstance(data[key], torch.Tensor):
                data[key] = data[key].to(device)

        if mode == "train":
            optimizer.zero_grad()

        if config.get("model", "model_name") == "Crf":
            if mode != "test":
                results = model(data=data, mode=mode, crf_mode="train")
                loss = results["loss"]
                total_loss += loss.item()
                results = model(data=data, mode=mode, crf_mode="test")
                evaluation.expand(results["prediction"], results["labels"])
            else:
                results = model(data=data, mode=mode, crf_mode="test")
                prediction = results["prediction"]
                if not isinstance(prediction, list):
                    prediction = prediction.cpu().numpy().tolist()
                docids = data["docids"]
                canids = data["canids"]
                for doc, can, pre in zip(docids, canids, prediction):
                    if doc not in pred.keys():
                        pred[doc] = []
                    assert (len(can) == len(pre))
                    for c, p in zip(can, pre):
                        if p != "O":
                            p = p[2:]
                        assert p in Global.type2id.keys()
                        pred[doc].append({
                            "id": c,
                            "type_id": Global.type2id[p]
                        })
        else:
            results = model(data=data, mode=mode)
            if mode != "test":
                loss = results["loss"]
                total_loss += loss.item()
                evaluation.expand(results["prediction"], results["labels"])
            else:
                prediction = results["prediction"].cpu().numpy().tolist()
                docids = data["docids"]
                canids = data["canids"]
                for did, cid, pre in zip(docids, canids, prediction):
                    if did not in pred.keys():
                        pred[did] = []
                    pred[did].append({"id": cid, "type_id": pre})
        if mode != "test":
            print("\r{}: Epoch {} Step {:0>4d}/{} | Loss = {:.4f}".format(
                mode, epoch, step + 1, len(dataset),
                round(total_loss / (step + 1), 4)),
                  end="")
        else:
            print("\r{}: Epoch {} Step {:0>4d}/{}".format(
                mode, epoch, step + 1, len(dataset)),
                  end="")

        if mode == "train":
            loss.backward()
            optimizer.step()

    if mode != "test":
        metric = evaluation.get_metric("all")
        sys.stdout.write("\r")
        print("\r{}: Epoch {} | Metric: {}".format(mode, epoch, metric))
        return metric
    else:
        return pred
# train
rnn = RNN(100, 128, len(vocab))
optimizer = optim.SGD(rnn.parameters(), lr=0.1)
loss_function = nn.BCELoss()
losses, acc = rnn_train(data.trainset, rnn, optimizer, loss_function,
                        data.testset)
# plt.xlabel("Train epoch")
# plt.ylabel("loss")
# plt.plot([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], losses)
# plt.show()
plt.xlabel("Train epoch")
plt.ylabel("accuracy")
plt.plot([1, 2, 3, 4, 5, 6], acc)
plt.show()
# test
y, predicty = rnn_test(data.testset, rnn)
eval = Evaluation()
eval.accuracy(y, predicty, data)
with open('result_rnn.txt', 'w') as f:
    for index, maxd in enumerate(eval.wrong):
        f.write("Case #{}: {} ".format(index + 1, maxd) + '\n')
# predicty=[0.1, 0.2, 0.55, 0.51, 0.53, 0.7]
# use common sense
predicty = use_csk(data.testset, predicty, rnn)
# Evaluation
eval = Evaluation()
eval.accuracy(y, predicty, data)

final = time.time()
print("time:", final - begin)
Example #18
0
    def __init__(self):

        self.eval = Evaluation()
        self.board = Dashboad(8098)
Example #19
0
class Experiments():
    def __init__(self):

        self.eval = Evaluation()
        self.board = Dashboad(8098)

    def error_hist(self, gtdir, resdir, imgprefix, plot=False):
        listGTFiles = [
            k.split('/')[-1].split('.')[0]
            for k in glob.glob(os.path.join(gtdir, '*.bmp'))
        ]

        filename_jacc = dict()
        filename_dice = dict()
        filename_sens = dict()
        filename_spec = dict()
        for currfile in tqdm(listGTFiles):
            if currfile.count('_') == 2:
                continue
            gt = np.array(Image.open(os.path.join(gtdir,
                                                  currfile + '.bmp'))) / 255
            res = np.array(
                Image.open(
                    os.path.join(resdir, currfile + '_' + imgprefix + '.bmp')))
            res[res > 10] = 255
            res /= 255

            jacc_index = self.eval.jaccard_similarity_coefficient(
                gt.squeeze(), res.squeeze())
            dice = self.eval.dice_coefficient(gt.squeeze(), res.squeeze())
            spec, sens, _ = self.eval.specificity_sensitivity(
                gt.squeeze(), res.squeeze())
            filename_jacc[currfile] = jacc_index
            filename_dice[currfile] = dice
            filename_sens[currfile] = sens
            filename_spec[currfile] = spec
        if plot:
            self.board.metric_bar(filename_jacc.values(),
                                  'Jaccard_' + imgprefix,
                                  nbins=20)
            self.board.metric_bar(filename_dice.values(),
                                  'Dice_' + imgprefix,
                                  nbins=20)
            self.board.metric_bar(filename_sens.values(),
                                  'Sens_' + imgprefix,
                                  nbins=20)
            self.board.metric_bar(filename_spec.values(),
                                  'Spec_' + imgprefix,
                                  nbins=20)

        return filename_jacc, filename_dice, filename_sens, filename_spec

    def get_failure_cases(self, args, threshold=0.5):
        list_dics = []

        for m in args.methods:
            result, _, _, _ = self.error_hist(args.gtdir,
                                              args.resdir,
                                              m,
                                              plot=False)
            list_dics.append(result)

        # Remove the images with jaccard greater than 0.5
        for d in list_dics:
            for k, v in d.items():
                if v > threshold:
                    del d[k]

        # Find the failure cases common between all methods
        common_failures = set.intersection(*tuple(
            set(d.keys()) for d in list_dics))
        return common_failures

    # TODO Remove val argument in function
    def make_grid(self, args, val=True, selected_filenames=None):
        bordersize = 2
        batch = np.empty((0, 3, 244, 324))
        num2sample = 60
        if selected_filenames is not None:
            filenames = list(selected_filenames)
            num2sample = len(filenames)
        else:
            if val:
                filenames = [
                    k.split('.')[-2].split('/')[-1]
                    for k in glob.glob(os.path.join(args.imgdir, "val_*"))
                ]
            if args.test:
                filenames = [
                    k.split('.')[-2].split('/')[-1]
                    for k in glob.glob(os.path.join(args.imgdir, "test_*.bmp"))
                ]
            else:
                filenames = [
                    k.split('.')[-2].split('/')[-1]
                    for k in glob.glob(os.path.join(args.imgdir, "*.bmp"))
                ]

        train_ind = np.random.choice(np.arange(0, len(filenames)),
                                     num2sample,
                                     replace=False)
        for i in range(train_ind.shape[0]):
            currfile = filenames[train_ind[i]]
            im = np.array(
                Image.open(os.path.join(args.imgdir,
                                        currfile + ".bmp")).convert('RGB'))
            # Applies a border on the top of the image
            im = cv2.copyMakeBorder(im,
                                    top=bordersize,
                                    bottom=bordersize,
                                    left=bordersize,
                                    right=bordersize,
                                    borderType=cv2.BORDER_CONSTANT,
                                    value=[255, 0, 0])
            im = cv2.putText(im,
                             currfile, (10, 20),
                             fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                             fontScale=0.75,
                             color=(255, 255, 255),
                             thickness=2)
            im = im.transpose((2, 0, 1))
            batch = np.append(batch, im[np.newaxis, :, :, :], axis=0)

            if val:
                im = np.array(
                    Image.open(os.path.join(args.gtdir,
                                            currfile + ".bmp")).convert('L'))
                im = np.repeat(im[:, :, np.newaxis], 3, axis=2)
                im = cv2.copyMakeBorder(im,
                                        top=bordersize,
                                        bottom=bordersize,
                                        left=bordersize,
                                        right=bordersize,
                                        borderType=cv2.BORDER_CONSTANT,
                                        value=[0, 255, 0])
                im = im.transpose((2, 0, 1))
                batch = np.append(batch, im[np.newaxis, :, :, :], axis=0)

            for m in args.methods:
                res = np.array(
                    Image.open(
                        os.path.join(args.resdir, currfile + "_" + m +
                                     ".bmp")).convert('L'))
                res = np.repeat(res[:, :, np.newaxis], 3, axis=2)
                res = cv2.copyMakeBorder(res,
                                         top=bordersize,
                                         bottom=bordersize,
                                         left=bordersize,
                                         right=bordersize,
                                         borderType=cv2.BORDER_CONSTANT,
                                         value=[0, 255, 200])

                # Writes the name of the models.
                res = cv2.putText(res,
                                  m, (10, 20),
                                  fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                  fontScale=0.75,
                                  color=(255, 255, 255),
                                  thickness=2)
                res = res.transpose((2, 0, 1))
                batch = np.append(batch, res[np.newaxis, :, :, :], axis=0)

        return batch
from opts import *
from TopicMDNet import TopicMDNet as MDNet

# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)  # set sess before initialize variable...

## Configure data loader
data_loader_val = DataLoader(FLAGS, which_set='train', image_size=FLAGS.image_size, shuffle=False, use_augmentation=False)
p_data_loader_val = ParallDataWraper(data_loader_val, batch_size=FLAGS.batch_size, thread=3)

## Configure evaluator
evaluator = Evaluation(FLAGS, data_loader_val)

## Configure the CNN model
feed_img_batch = tf.placeholder(tf.float32, shape=[None, FLAGS.image_size, FLAGS.image_size, 3])
with tf.variable_scope('PRETRAINED_CNN'):
    print ('--> init the image model ...')
    CNN, CNN_all_vars = classifier.get_main_network('inception', input_tensor=feed_img_batch, num_classes=3, use_weights=False)
    CNN_last_conv = CNN.layers[-3].output

CNN_all_vars = [var for var in tf.trainable_variables() if 'PRETRAINED_CNN' in var.name ]
FLAGS.conv_feat_dim = int(CNN_last_conv.shape[3])
## Configure the language model model
LSTM = MDNet(FLAGS, data_loader_val,
            CNN_last_conv,
            CNN.output, # classifier logit
            CNN.layers[-1].weights[0].op.name,   # the classifier weight op name for get_collection in MDNet
Example #21
0
 def run(self):
     results = self.fit_to_validation()
     evaluation = Evaluation(results, verbose=False)
     evaluation.run()
Example #22
0
def train(args, model):
    #     board = Dashboad(args.visdom_port) #visdom
    tr_losses = AverageMeter()
    tLoader, vLoader = load_data(args)

    criterion = nn.BCELoss()
    #     criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          weight_decay=args.weight_decay,
                          momentum=0.99)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)

    for epoch in range(1, args.num_epochs + 1):
        scheduler.step()
        if epoch == 1:
            tr_loss, _, _ = evaluate(args, model, criterion, tLoader)
            vl_loss, vl_jacc, vl_dice = evaluate(args, model, criterion,
                                                 vLoader)

            # Draw the loss curves
            win = None
            #             win = board.loss_curves([tr_loss, vl_loss], epoch, win=win) #visdom
            print('[Initial TrainLoss: {0:.4f}]'
                  '\t[Initial ValidationLoss: {1:.4f}]'
                  '\t[Initial ValidationJaccard: {2:.4f}]'
                  '\t[Initial ValidationDice: {3:.4f}]'.format(
                      tr_loss, vl_loss, vl_jacc, vl_dice))
            print(
                '----------------------------------------------------------------------------------------------------'
                '--------------')

        for step, (images, labels) in enumerate(tLoader):
            model.train(True)
            if args.cuda:
                images = images.cuda()
                labels = labels.cuda()
                criterion = criterion.cuda()

            inputs = Variable(images)
            targets = Variable(labels)

            optimizer.zero_grad()
            outputs = model(inputs)

            eva = Evaluation()
            #            jacc = eva.jaccard_similarity_coefficient(outputs.cpu().data.numpy().squeeze(),
            #                                                        labels.cpu().data.numpy())
            output_np = outputs.cpu().data.numpy().squeeze()

            targets_np = targets.cpu().data.numpy()

            dice = eva.dice_coefficient(output_np, targets_np)

            crit = criterion(outputs, targets)
            loss = crit - dice

            print('bceloss : ', crit.data)
            print('bceloss - dice : ', loss.data)

            loss.backward()
            optimizer.step()

            tr_losses.update(loss.data.cpu().numpy())

        if epoch % args.log_step == 0:
            vl_loss, vl_jacc, vl_dice = evaluate(args, model, criterion,
                                                 vLoader)
            print('[Epoch: {0:02}/{1:02}]'
                  '\t[TrainLoss: {2:.4f}]'
                  '\t[ValidationLoss: {3:.4f}]'
                  '\t[ValidationJaccard: {4:.4f}]'
                  '\t[ValidationDice: {5:.4f}]'.format(epoch, args.num_epochs,
                                                       tr_losses.avg, vl_loss,
                                                       vl_jacc, vl_dice)),

            filename = "weights/{0}-{1:02}.pth".format(args.model, epoch)
            torch.save(model.state_dict(), filename)
            print('  [Snapshot]')
        else:
            vl_loss, vl_jacc, vl_dice = evaluate(args, model, criterion,
                                                 vLoader)
            print('[Epoch: {0:02}/{1:02}]'
                  '\t[TrainLoss: {2:.4f}]'
                  '\t[ValidationLoss: {3:.4f}]'
                  '\t[ValidationJaccard: {4:.4f}]'
                  '\t[ValidationDice: {5:.4f}]'.format(epoch, args.num_epochs,
                                                       tr_losses.avg, vl_loss,
                                                       vl_jacc, vl_dice))