示例#1
0
from keras.optimizers import SGD
from keras.layers import Input, Conv3D, MaxPooling3D, AveragePooling3D, Flatten, Dropout, BatchNormalization
from keras.models import Model
from keras.metrics import binary_accuracy, binary_crossentropy, mean_absolute_error
from keras.constraints import maxnorm

from dnn_model import DNN_model
import helpers
import random

random.seed(2)

default_logger = helpers.getlogger("3DCNN")

import tensorflow as tf
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))


class ThreeDCNN(DNN_model):
    def __init__(self, logger=default_logger):
        DNN_model.__init__(self)
        self.logger = logger

    def generate_model(self,
import ResNet50

# limit memory usage..
import tensorflow as tf
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)

from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
set_session(tf.Session(config=config))

#logging.config.fileConfig("logging.conf")
logger = helpers.getlogger(os.path.splitext(os.path.basename(__file__))[0])
# zonder aug, 10:1 99 train, 97 test, 0.27 cross entropy, before commit 573
# 3 pools istead of 4 gives (bigger end layer) gives much worse validation accuray + logloss .. strange ?
# 32 x 32 x 32 lijkt het beter te doen dan 48 x 48 x 48..

K.set_image_dim_ordering("tf")
CUBE_SIZE = 32
MEAN_PIXEL_VALUE = settings.MEAN_PIXEL_VALUE_NODULE
POS_WEIGHT = 2
NEGS_PER_POS = 1   # NEGS_PER_POS = 20
P_TH = 0.6
# POS_IMG_DIR = "luna16_train_cubes_pos"
LEARN_RATE = 0.1

# USE_DROPOUT = False
TENSORBOARD_LOG_DIR = "tfb_log/"
示例#3
0
import os
import pandas as pd
import settings
import helpers
import dicom

mylogger = helpers.getlogger('process_data.log')

CUBE_SIZE = 32


def merge_nodule_detector_results(patient_dir, result_dir, logger=mylogger):
    if not os.path.exists(patient_dir) or not os.path.exists(result_dir):
        return
    dst_dir = result_dir + "merge/"
    if not os.path.exists(dst_dir):
        os.makedirs(dst_dir)

    for patient_id in os.listdir(patient_dir):
        merge_lines = []
        #merge_index = 0
        for model_name in os.listdir(result_dir):
            if model_name != "merge":
                csv_path = result_dir + model_name + "/" + patient_id + ".csv"
                if not os.path.exists(csv_path):
                    logger.info("{0} does not exist.".format(csv_path))
                else:
                    result = pd.read_csv(csv_path)
                    for ind, row in result.iterrows():
                        row["anno_index"] = model_name + "_" + str(ind)
                        merge_lines.append(row)