Beispiel #1
0
def apply_transformations(X=None, y=None, save=False):

    config = get_config()

    if X is None:
        X = np.load("data/processed/X_2c.npy")
        y = np.load("data/processed/y_2c.npy")

    aug = create_augmenter()

    samples_to_augment = config["preprocessing"]["augmented_ds_size"]

    X_augmented = np.empty((samples_to_augment, 64, 64, 3))
    y_ = []

    i = 0
    for index, item in enumerate(X_augmented):
        rnd = np.random.choice(X.shape[0], 1)[0]

        X_augmented[index, :, :, :] = X[rnd]
        y_.append(y[rnd])

    X_augmented = np.concatenate([X_augmented, X], axis=0)
    y_ = np.concatenate([y_, y])

    X_aug = aug.augment_images(X_augmented)

    if save:
        np.save("data/processed/X_a.npy", X_augmented)
        np.save("data/processed/y_a.npy", y_)
    return X_aug, y_
Beispiel #2
0
def load_callbacks(weights_path):

    config = get_config()
    cbacks_config = config["train"]["callbacks"]

    metrics = Metrics()
    model_checkpoint = ModelCheckpoint(weights_path, monitor='val_loss', save_best_only=True)

    #cbacks = [metrics, model_checkpoint]    
    cbacks = [ model_checkpoint]

    lr_reduce = cbacks_config["lr_reduce"]
    if lr_reduce['apply']:
        params = ['monitor', 'factor', 'patience', 'verbose', 'mode', 'min_delta', 'cooldown', 'min_lr']
        lr_reduce_cb = _load_config_params(params, ReduceLROnPlateau,lr_reduce)

        cbacks.append(lr_reduce_cb)


    early_stopping = cbacks_config["early_stopping"]
    if early_stopping["apply"]:
        params = ['monitor', 'min_delta', 'patience', 'mode', 'verbose']

        early_stopping_cb = _load_config_params(params, EarlyStopping, early_stopping)

        cbacks.append(early_stopping_cb)
    
    return cbacks
Beispiel #3
0
def apply_rescale(X):
    """Normalizes a given dataset.

    Parameters
    ----------
    X: A numpy array of type (n, w, h, c)

    Returns
    -------
    X_: X after applying normalization

    """
    config = get_config()
    if config["preprocessing"]["rescale"]:

        if config["preprocessing"]["rescale_method"] == "normalize":
            X_ = normalize_meanstd(X)

        elif config["preprocessing"]["rescale_method"] == "standartize":
            X_ = normalize_01(X)

        else:
            print("{} is not a valid rescaling method".format(
                config["preprocessing"]["rescale_method"]))

    else:
        X_ = X

    return X_
Beispiel #4
0
def load_optimizer():

    config = get_config()

    opt_config = config["train"]["optimizers"]

    opt_name = opt_config["use"]

    if (opt_name == "adam"):
        params_to_parse = [
            "lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad"
        ]

        opt = _load_config_params(params_to_parse, Adam, opt_config)

    elif (opt_name == "sgd"):
        params_to_parse = ["lr", "rho", "epsilon", "decay"]
        opt = _load_config_params(params_to_parse, SGD, opt_config)

    elif (opt_name == "rmsprop"):
        params_to_parse = ["lr", "rho", "epsilon", "decay"]
        opt = _load_config_params(params_to_parse, SGD, opt_config)

        opt = _load_config_params(params_to_parse, RMSprop, opt_config)

    elif (opt_name == "nadam"):
        params_to_parse = ["lr", "beta_1", "beta_2", "epsilon"]

        opt = _load_config_params(params_to_parse, Nadam, opt_config)

    return opt
Beispiel #5
0
    def architecture(self):

        config = get_config()
        config = config["train"]["optimizers"]

        channels, height, weight = 3, 500, 500

        # Input
        #input_shape = (height, weight, 3)
        img_input = Input(shape=(64, 64, 1))

        output = CrfRnnLayer(
            image_dims=(64, 64),
            num_classes=1,
            theta_alpha=config["crf_theta_alpha"],  #3
            theta_beta=config["crf_theta_beta"],  #3
            theta_gamma=config["crf_theta_gamma"],  #3
            num_iterations=config["crf_num_iterations"],
            name='crfrnn')([img_input, img_input])
        #

        k = Flatten()(output)
        predictions = Dense(1, activation='sigmoid')(k)

        model = Model(img_input, predictions, name='crfrnn_net')

        return model
Beispiel #6
0
def create_augmented_dataset(X=None, y=None, save=False, return_data=False):
    config = get_config()

    X_, y_ = apply_transformations(X, y)

    if save:
        np.save("data/processed/X_train.npy", X_train)
        np.save("data/processed/y_train.npy", y_train)

        np.save("data/processed/X_val.npy", X_val)
        np.save("data/processed/y_val.npy", y_val)

    if return_data:
        return X_, y_
Beispiel #7
0
def log_model(model, c_backs, metrics_dic, config=None):
    """ Description
    :type model: Keras model
    :param model:

    :type c_backs: Keras call_back file
    :param c_backs:

    :type config: A dict containing the current parameters
    :param config:

    :raises:

    :rtype:
        """

    import datetime

    if config is None:
        from src.helper import get_config

        config = get_config()

    now = datetime.datetime.now()

    log = dict()
    log["datetime"] = {
        'date': now.strftime("%Y-%m-%d %H:%M"),
        'unix': now.timestamp()
    }
    log["metric"] = metrics_dic

    hist = model.history.history
    hist["epoch"] = model.history.epoch

    for cb in c_backs:
        if cb.__class__.__name__ == 'Metrics':
            hist["val_auroc"] = cb.val_auroc
            hist["val_precision"] = cb.val_precisions
            hist["val_recall"] = cb.val_recalls

    log["parameters"] = config
    log["history"] = hist

    import os
    file_path = os.path.join('logs', now.strftime("log_%Y_%m_%d_%H_%M.json"))

    with open(file_path, 'a+') as fp:
        json.dump(log, fp, cls=NumpyEncoder)
Beispiel #8
0
    def architecture(self):
        config = get_config()
        config = config["train"]["optimizers"]

        channels, height, weight = 3, 500, 500

        input_shape = (height, weight, 3)

        base_model = ResNet50(include_top=False,
                              input_shape=(self.input_shape),
                              classes=1,
                              weights=None)

        x = base_model.output

        score2 = Conv2DTranspose(1, (12, 12), strides=2, name='score2')(x)

        # Final up-sampling and cropping
        upsample = Conv2DTranspose(1, (12, 12),
                                   strides=4,
                                   name='upsample',
                                   use_bias=False)(score2)
        #upsample = Cropping2D((12, 12))(upsample)

        img_input = base_model.input
        x = ZeroPadding2D(padding=(218, 218))(img_input)

        output = CrfRnnLayer(
            image_dims=(64, 64),
            num_classes=1,
            theta_alpha=config["crf_theta_alpha"],  #3
            theta_beta=config["crf_theta_beta"],  #3
            theta_gamma=config["crf_theta_gamma"],  #3
            num_iterations=config["crf_num_iterations"],
            name='crfrnn')([upsample, img_input])

        x = Flatten()(output)
        x = Dense(1024, activation='relu')(x)
        x = Dropout(.5)(x)
        x = Dense(1024, activation='relu')(x)
        predictions = Dense(1, activation='sigmoid')(x)

        model = Model(inputs=base_model.input, outputs=predictions)

        return model
Beispiel #9
0
def get_image_patch(img_array, coords, padding=None):
    """ Description
    :type img_array: numpy array
    :param img_array: a image comprised of a numpy array

    :type coords: list(i,j)
    :param coords: The coordinates where the crop will be centered, of type (i,j), i being the columns and j the row

    :type padding: int or list(x,y)
    :param padding: The padding that will be around the center coordinates. If an int, it will create a square image. If a list x is the horizontal padding and y the vertical

    :raises:

    :rtype:
     """

    if padding is None:
        from src.helper import get_config

        config = get_config()

        padding = config["general"]["padding"]

    i = coords[0]
    j = coords[1]
    k = coords[2]

    if isinstance(padding, list):
        h_padding = padding[0]
        v_padding = padding[1]
    else:
        h_padding = padding
        v_padding = padding

    try:
        X_ = img_array[j - v_padding:j + v_padding,
                       i - h_padding:i + h_padding, k]
    except:
        return None
    else:
        if (X_.shape == (2 * padding, 2 * padding)):
            return np.asarray(X_).reshape((1, 2 * padding, 2 * padding, 1))
class BaseArchitecture():
    weights_directory = "data/interim/weights/"

    name = "Base Architecture"
    flaten_layer = None

    config = get_config()
    input_shape = (2 * config["general"]["padding"],
                   2 * config["general"]["padding"],
                   config["general"]["channels"])

    def __init__(self, load_weights=False):
        self.id = self.name.lower().strip().replace(" ", "_")
        self.file = "{}.h5".format(self.id)
        self.weights_path = os.path.join(self.weights_directory, self.file)
        self.load_weights = load_weights

    def architecture(self):
        ''' This method returns the keras architecture of the model. It should be implemented by each sub class '''
        raise NotImplementedError
Beispiel #11
0
class DatabaseConnect:
    """ handle db """

    CONFIG = get_config()

    def __init__(self):
        self.conn, self.cur = self.db_connect()

    def db_connect(self):
        """ returns connection and curser """
        # Connect to database
        conn = psycopg2.connect(
            host=self.CONFIG['postgres']['db_host'],
            database=self.CONFIG['postgres']['db_database'],
            user=self.CONFIG['postgres']['db_user'],
            password=self.CONFIG['postgres']['db_password'])
        # Open a cursor to perform database operations
        cur = conn.cursor()
        return conn, cur

    def db_execute(self, query):
        """ run a query """
        if isinstance(query, str):
            self.cur.execute(query)
            rows = self.cur.fetchall()
        elif isinstance(query, tuple):
            self.cur.execute(query[0], query[1])
            rows = False

        return rows

    def db_close(self):
        """ clean close the conn and curser """
        self.conn.commit()
        self.cur.close()
        self.conn.close()
Beispiel #12
0
    def architecture(self):
        config = get_config()
        config = config["train"]["optimizers"]

        channels, height, weight = 3, 500, 500

        # Input
        input_shape = (height, weight, 3)
        img_input = Input(shape=self.input_shape)
        #img_input = Cropping2D((3,3))(img_input)

        # Add plenty of zero padding
        x = ZeroPadding2D(padding=(218, 218))(img_input)

        # VGG-16 convolution block 1
        x = Conv2D(64, (3, 3), activation='relu', padding='valid', name='conv1_1')(x)
        x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

        # VGG-16 convolution block 2
        x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(x)
        x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2', padding='same')(x)

        # VGG-16 convolution block output3
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(x)
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(x)
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3', padding='same')(x)
        pool3 = x

        # VGG-16 convolution block 4
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4', padding='same')(x)
        pool4 = x

        # VGG-16 convolution block 5
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5', padding='same')(x)

        # Fully-connected layers converted to convolution layers
        x = Conv2D(128, (7, 7), activation='relu', padding='valid', name='fc6')(x)
        x = Dropout(0.5)(x)
        x = Conv2D(128, (1, 1), activation='relu', padding='valid', name='fc7')(x)
        x = Dropout(0.5)(x)
        x = Conv2D(21, (1, 1), padding='valid', name='score-fr')(x)

        # Deconvolution
        score2 = Conv2DTranspose(1, (4, 4), strides=2, name='score2')(x)

        # Skip connections from pool4
        score_pool4 = Conv2D(1, (1, 1), name='score-pool4')(pool4)
        score_pool4c = Cropping2D((5, 5))(score_pool4)
        score_fused = Add()([score2, score_pool4c])
        score4 = Conv2DTranspose(1, (4, 4), strides=2, name='score4', use_bias=False)(score_fused)

        # Skip connections from pool3
        score_pool3 = Conv2D(1, (1, 1), name='score-pool3')(pool3)
        score_pool3c = Cropping2D((9, 9))(score_pool3)
        score_pool3c = ZeroPadding2D(padding=((1,0), (1,0)))(score_pool3c)


        # Fuse things together
        score_final = Add()([score4, score_pool3c])

        # Final up-sampling and cropping
        upsample = Conv2DTranspose(1, (4, 4), strides=4, name='upsample', use_bias=False)(score_final)
        upscore = Cropping2D(((56, 56), (56, 56)))(upsample)
        upscore = Cropping2D(((4, 4), (4, 4)))(upscore)

        output = CrfRnnLayer(image_dims=(64, 64),
                            num_classes=1,
                            theta_alpha= config["crf_theta_alpha"], #3
                            theta_beta= config["crf_theta_beta"], #3
                            theta_gamma= config["crf_theta_gamma"], #3
                            num_iterations= config["crf_num_iterations"],
                            name='crfrnn')([upscore, img_input])


        classi = Add()([upscore, output])
        k = Flatten()(classi)

        k = Dense(128, activation='relu')(k)
        k = Dropout(.5)(k)
        k = Dense(256, activation='relu')(k)
        predictions = Dense(1, activation='sigmoid')(k)

        # Build the model
        model = Model(img_input, predictions, name='CRFVGG')

        return model
Beispiel #13
0
from flask_httpauth import HTTPBasicAuth
from apscheduler.schedulers.background import BackgroundScheduler

from src.helper import Table, get_config
from src.db import get_current, insert_data
from src.graph_current import main as current_graph
from src.graph_nightly import main as nightly_graph
from src.graph_monthly import main as monthly_graph

import matplotlib
matplotlib.use('Agg')

# start up
app = Flask(__name__)

CONFIG = get_config()
auth = HTTPBasicAuth()
aqi_user = CONFIG['aqi_monitor']
USER_DATA = {aqi_user['authUsername']: aqi_user['authPassword']}

# initial export
print('initial export')
current_graph()
nightly_graph()
monthly_graph()

# start scheduler
scheduler = BackgroundScheduler()
scheduler.add_job(current_graph,
                  trigger="cron",
                  minute='*/5',
Beispiel #14
0
from keras.models import Model
from src.helper import get_config
import numpy as np
import os
import yaml

config = get_config()


def get_intermediate_output(model, layer_name, X, y=None):
    intermediate_layer_model = Model(
        inputs=model.input, outputs=model.get_layer(layer_name).output)
    intermediate_output = intermediate_layer_model.predict(X)

    return intermediate_output


def save_results(model, X, y, label, d_set, layer_name):
    out_folder = config["meta"]["layers_path"]
    weights_path = os.path.join(out_folder, "{}.h5".format(label))
    np_path = os.path.join(out_folder, "{}_{}".format(label, d_set))

    x_ = get_intermediate_output(model, layer_name, X, y)
    output = np.column_stack((x_, y))

    #model.save(weights_path)
    np.save(np_path, output)


def save_results_wrapper(model, layer_name, label, X_train, y_train, X_test,
                         y_test, X_val, y_val):
Beispiel #15
0
    def architecture(self):

        config = get_config()
        config = config["train"]["optimizers"]

        channels, height, weight = 3, 500, 500

        # Input
        #input_shape = (height, weight, 3)
        img_input = Input(shape=self.input_shape)
        #img_input = Cropping2D((3,3))(img_input)

        # Add plenty of zero padding
        #x = ZeroPadding2D(padding=(218, 218))(img_input)

        # block 1

        x = Conv2D(strides=1, filters=32, kernel_size=3,
                   padding="same")(img_input)
        x = BatchNormalization()(x)
        x = Activation(activation="relu")(x)
        x = Conv2D(strides=1, filters=32, kernel_size=3, padding="same")(x)
        x = BatchNormalization()(x)
        x = Activation(activation="relu")(x)

        block1 = x

        x = MaxPooling2D((2, 2), 2, padding="same")(x)

        # block 2

        x = Conv2D(strides=1, filters=16, kernel_size=3, padding="same")(x)
        x = BatchNormalization()(x)
        x = Activation(activation="relu")(x)

        x = Conv2D(strides=1, filters=16, kernel_size=3, padding="same")(x)
        x = BatchNormalization()(x)
        x = Activation(activation="relu")(x)

        block2 = x

        x = MaxPooling2D((2, 2), 2, padding="same")(x)

        block3 = x

        # Deconvolution

        score1 = Conv2D(1, (1, 1))(block1)
        score2 = Conv2D(1, (1, 1))(block2)
        score2 = Conv2DTranspose(1, (2, 2),
                                 strides=2,
                                 name='score2',
                                 use_bias=False)(score2)

        score3 = Conv2D(1, (1, 1))(block3)
        score3 = Conv2DTranspose(1, (2, 2), strides=4)(score3)
        #
        score3 = BatchNormalization()(score3)
        #score3 = ZeroPadding2D(2)(score3)

        # Fuse things together
        #score_final = Add()([score4, score_pool3c])

        # Final up-sampling and cropping

        score = Add()([score1, score2])
        #
        score = BatchNormalization()(score)
        score_pool = Add()([score, score3])
        #
        score_pool = BatchNormalization()(score_pool)

        output = CrfRnnLayer(
            image_dims=(64, 64),
            num_classes=1,
            theta_alpha=config["crf_theta_alpha"],  #3
            theta_beta=config["crf_theta_beta"],  #3
            theta_gamma=config["crf_theta_gamma"],  #3
            num_iterations=config["crf_num_iterations"],
            name='crfrnn')([score_pool, img_input])
        #

        output = BatchNormalization()(output)

        classi = Add()([score_pool, output])
        k = Flatten()(classi)

        k = Dense(128, activation='relu')(k)
        k = Dropout(.5)(k)
        k = Dense(256, activation='relu')(k)
        predictions = Dense(1, activation='sigmoid')(k)

        # Build the model
        model = Model(img_input, predictions, name='crfrnn_net')

        return model
Beispiel #16
0
def get_exam(lesion_info,
             exam='ADC',
             padding=None,
             base_path="data/interim/train/"):
    """ Description
    :type lesion_info: row of metada_labels
    :param lesion_info:

    :type exam: string
    :param exam: the exam string to split in the description

    :type padding: int
    :param padding: Padding around the lesion to be retrieved

    :type base_path: string
    :param base_path: path to where the image files are in the project directory

    :raises:

    :rtype:
    """

    if padding is None:
        from src.helper import get_config

        config = get_config()

        padding = config["general"]["padding"]

    exam_row = lesion_info

    exam_row = exam_row.loc[exam_row.DCMSerDescr.str.contains(exam)]

    if exam_row.empty:
        print("No lesion found")
        return None

    else:
        tmp_row = exam_row.iloc[0]

        exam_folder = os.path.join(os.getcwd(), base_path, tmp_row.ProxID,
                                   tmp_row.DCMSerDescr)

        if (exam != 'KTrans'):
            image = load_dicom_series(input_dir=exam_folder)

        else:
            image = load_ktrans(exam_folder)

        if image is None:
            return None

        if (tmp_row.k < image.shape[2]):
            slice_array = image[:, :, tmp_row.k]
            patch = get_image_patch(slice_array, (tmp_row.i, tmp_row.j),
                                    padding=padding)
            # print(patch.shape)

        else:
            print("Had to cut image")
            return None

        if (patch.shape == (2 * padding, 2 * padding)):
            return np.asarray(patch).reshape((1, 2 * padding, 2 * padding, 1))
Beispiel #17
0
class CurrentPlot:
    """ recreate the last 3h plot """

    CONFIG = get_config()
    FILENAME = 'static/dyn/current.png'

    def __init__(self):
        self.now = datetime.now()
        self.rows = self.get_data()
        self.axis = None

    def get_data(self):
        """ export from postgres """
        now_epoch = int(self.now.strftime('%s'))
        last_3h = now_epoch - 3 * 60 * 60

        query = ('SELECT epoch_time, aqi_value FROM aqi '
                 f'WHERE epoch_time > {last_3h} ORDER BY epoch_time DESC;')

        db_handler = DatabaseConnect()
        rows = db_handler.db_execute(query)
        db_handler.db_close()

        return rows

    def build_title(self):
        """ build title from timestamps """

        time_from = datetime.fromtimestamp(self.rows[-1][0]).strftime('%H:%M')
        time_until = datetime.fromtimestamp(self.rows[0][0]).strftime('%H:%M')
        plt_title = f'AQI values last 3h: {time_from} - {time_until}'

        return plt_title

    def build_axis(self):
        """ build plot axis """
        rows = self.rows
        x_timeline = [datetime.fromtimestamp(i[0]) for i in rows]
        y_aqi_values = [int(i[1]) for i in rows]
        data = {'timestamp': x_timeline, 'aqi': y_aqi_values}
        df = pd.DataFrame(data)

        indexed = df.set_index('timestamp')
        indexed.sort_values(by=['timestamp'], inplace=True)
        mean = indexed.resample('3min').mean()
        mean.interpolate(method='linear',
                         limit=1,
                         inplace=True,
                         limit_area='inside')
        mean.reset_index(level=0, inplace=True)
        mean['timestamp'] = mean['timestamp'].dt.strftime('%H:%M')
        mean['aqi'] = mean['aqi'].round()
        plt_title = self.build_title()
        # xticks
        x_ticks = []
        for num, i in enumerate(mean['timestamp']):
            minute = int(i.split(':')[1])
            if minute % 15 == 0:
                x_ticks.append(num)

        axis = {
            'x': mean['timestamp'],
            'y': mean['aqi'],
            'x_ticks': x_ticks,
            'plt_title': plt_title
        }
        self.axis = axis

    def write_plt(self):
        """ save plot to file """
        x = self.axis['x']
        y = self.axis['y']
        x_ticks = self.axis['x_ticks']
        # calc ticks
        y_max = np.ceil(y.max() / 50) * 50 + 50
        # setup plot
        plt.style.use('seaborn')
        plt.plot(
            x,
            y,
            color='#313131',
        )
        # fill colors
        plt_fill(plt, x, y)
        # handle passing ticks and lables separatly
        if len(x_ticks) == 2:
            plt.xticks(x_ticks[0], x_ticks[1])
        else:
            plt.xticks(x_ticks)
        plt.yticks(np.arange(0, y_max, step=50))
        plt.title(self.axis['plt_title'], fontsize=20)
        plt.tight_layout()
        plt.savefig(self.FILENAME, dpi=300)
        plt.figure()
        plt.close('all')
Beispiel #18
0
class NightlyPlots:
    """ get nightly data """

    CONFIG = get_config()

    def __init__(self):
        self.now = datetime.now()
        print('get data from db')
        self.rows, self.y_rows = self.get_data()

    @staticmethod
    def color_colums(y):
        """ helper function to color bar columns """

        breakpoints = [
            ('#85a762', 0, 50),  # good
            ('#d4b93c', 50, 100),  # moderate
            ('#e96843', 100, 150),  # ufsg
            ('#d03f3b', 150, 200),  # unhealthy
            ('#be4173', 200, 300),  # vunhealthy
            ('#714261', 300, 500),  # hazardous
        ]

        colors = []
        for value in y:
            for break_point in breakpoints:
                color, min_val, max_val = break_point
                if min_val < value <= max_val:
                    # found it
                    colors.append(color)
                    break

        return colors

    def get_data(self):
        """ export from postgres """
        # current
        day_until = int(self.now.date().strftime('%s'))
        day_from = day_until - 10 * 24 * 60 * 60
        query = ('SELECT epoch_time, aqi_value, pm25, pm10 FROM aqi WHERE '
                 f'epoch_time > {day_from} AND epoch_time < {day_until} '
                 'ORDER BY epoch_time DESC;')
        # last year
        y_until = day_until - 365 * 24 * 60 * 60
        y_from = y_until - 10 * 24 * 60 * 60
        y_query = ('SELECT epoch_time, aqi_value FROM aqi WHERE '
                   f'epoch_time > {y_from} AND epoch_time < {y_until} '
                   'ORDER BY epoch_time DESC;')
        db_handler = DatabaseConnect()
        rows = db_handler.db_execute(query)
        y_rows = db_handler.db_execute(y_query)
        db_handler.db_close()

        return rows, y_rows

    def recreate_last_7(self):
        """ last seven days """
        day_until = int(self.now.date().strftime('%s'))
        day_from = day_until - 7 * 24 * 60 * 60
        rows = [i for i in self.rows if day_from < i[0] < day_until]
        date_from = datetime.fromtimestamp(day_from).strftime('%d %b')
        date_until = datetime.fromtimestamp(day_until).strftime('%d %b')
        plt_title = f'AQI values from: {date_from} until {date_until}'
        _ = LastSevenDays(rows, plt_title)

    def recreate_last_3(self):
        """ last three days """
        _ = LastThreeDays(self.rows, self.now)

    def recreate_pm_chart(self):
        """ recreating pm2.5 and pm10 charts """
        _ = PmGraphs(self.rows)

    def recreate_hour_bar(self):
        """ recreate hourly average through day bar chart """
        day_until = int(self.now.date().strftime('%s'))
        day_from = day_until - 3 * 24 * 60 * 60
        rows = [i for i in self.rows if day_from < i[0] < day_until]
        _ = HourBar(rows)

    def recreate_year_comparison(self):
        """ recreate year comparison chart and table for json """
        _ = YearComparison(self.rows, self.y_rows)
Beispiel #19
0
    def architecture(self):
        config = get_config()
        config = config["train"]["optimizers"]
        # Initialize model
        # Layer 1
        img_input = Input(shape=self.input_shape)

        x = Conv2D(96, (11, 11),
                   input_shape=self.input_shape,
                   padding='same',
                   activation='relu')(img_input)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = MaxPool2D(pool_size=(2, 2))(x)

        block_1 = Conv2D(1, (1, 1))(x)

        # Layer 2
        x = Conv2D(256, (5, 5), padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPool2D(pool_size=(2, 2))(x)

        # Layer 3
        x = Conv2D(512, (3, 3), padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPool2D(pool_size=(2, 2))(x)

        block_2 = Conv2D(1, (1, 1))(x)

        # Layer 4
        x = ZeroPadding2D((1, 1))(x)
        x = Conv2D(512, (3, 3), padding='same', activation='relu')(x)
        x = BatchNormalization()(x)

        # Layer 5
        x = ZeroPadding2D((1, 1))(x)
        x = Conv2D(512, (3, 3), padding='same', activation='relu')(x)
        x = BatchNormalization()(x)

        block_3 = Conv2D(1, (1, 1))(x)
        block_3 = ZeroPadding2D((2, 2))(block_3)

        block_1 = Conv2DTranspose(1, (1, 1), strides=2,
                                  name="block_1")(block_1)

        block_2 = Conv2DTranspose(1, (1, 1), strides=8,
                                  name="block_2")(block_2)

        block_3 = Conv2DTranspose(1, (1, 1), strides=4,
                                  name="block_3")(block_3)

        upscore = Add()([block_1, block_3, block_2])

        output = CrfRnnLayer(
            image_dims=(64, 64),
            num_classes=1,
            theta_alpha=config["crf_theta_alpha"],  #3
            theta_beta=config["crf_theta_beta"],  #3
            theta_gamma=config["crf_theta_gamma"],  #3
            num_iterations=config["crf_num_iterations"],
            name='crfrnn')([upscore, img_input])

        classi = Add()([upscore, output])
        k = Flatten()(classi)

        k = Dense(128, activation='relu')(k)
        k = Dropout(.5)(k)
        k = Dense(256, activation='relu')(k)
        predictions = Dense(1, activation='sigmoid')(k)

        # Build the model
        model = Model(img_input, predictions, name='CRFALEXNET')

        return model
Beispiel #20
0
class Weather:
    """ handle weather lookup from API """

    CONFIG = get_config()

    def __init__(self):
        now = datetime.now()
        self.epoch_time = int(now.strftime('%s'))
        self.last_weather = self.get_weather()

    def get_weather(self):
        """ get weather from disk or api if too old """
        try:
            last_dict = self.get_cache()
        except FileNotFoundError:
            # create for first time
            last_dict = self.get_openweather()
        last_epoch = last_dict['epoch_time']

        if self.epoch_time - last_epoch > 10 * 60:
            print('get new weather data')
            weather = self.get_openweather()
        else:
            print('reuse weather data')
            weather = last_dict

        del weather['epoch_time']

        return weather

    def get_openweather(self):
        """ get missing weatherdata from openweathermap api """
        api_key = self.CONFIG['openweathermap']['api_key']
        lat = self.CONFIG['openweathermap']['lat']
        lon = self.CONFIG['openweathermap']['lon']

        url = ('https://api.openweathermap.org/data/2.5/weather' +
               f'?&units=metric&appid={api_key}&lat={lat}&lon={lon}')
        resp = requests.get(url, timeout=20).json()
        weather = {
            'weather_name': resp['weather'][0]['main'],
            'weather_icon': resp['weather'][0]['icon'],
            'wind_speed': resp['wind']['speed'],
            'wind_direction': resp['wind']['deg'],
            'epoch_time': self.epoch_time
        }
        self.write_cache(weather)

        return weather

    @staticmethod
    def get_cache():
        """ get last stored dict """
        with open('static/dyn/weather.json', 'r') as f:
            last = f.read()

        last_dict = json.loads(last)
        return last_dict

    @staticmethod
    def write_cache(weather):
        """ update last stored value """
        weather_str = json.dumps(weather)
        with open('static/dyn/weather.json', 'w') as f:
            f.write(weather_str)
def create_dataset(padding=None, overwrite=False):

    config = get_config()
    base_path = "data/interim/train/"

    metadata = pd.read_csv("data/interim/train_information.csv")

    s = 0
    i = 0

    if padding is None:

        padding = config["general"]["padding"]

        print(padding)

    X = np.empty((1000, 2 * padding, 2 * padding, 3))

    y = []

    i = 0
    to_iterate = metadata.drop_duplicates(["ProxID", "fid", "pos"
                                           ])[["ProxID", "fid", "ClinSig"]]

    aug = iaa.Scale(0.5,
                    interpolation=config["preprocessing"]["interpolation"])

    for tup in to_iterate.itertuples():
        X_ = []

        lesion_info = metadata[(metadata.ProxID == tup.ProxID)
                               & (metadata.fid == tup.fid)]

        adc = get_exam(lesion_info, '_ADC', padding=padding)
        if adc is None:
            continue

        t2_tse = get_exam(lesion_info, 't2_tse_tra', padding=2 * padding)
        if t2_tse is None:
            continue
        t2_tse = np.uint8(t2_tse)
        t2_tse = aug.augment_images(t2_tse)

        ktrans = get_exam(lesion_info, 'KTrans', padding=padding)

        if ktrans is None:
            continue

        X_ = np.concatenate([adc, t2_tse, ktrans], axis=3)

        X[i, :, :, :] = X_
        i = i + 1

        y_ = 1 if tup.ClinSig else 0
        y.append(y_)

    X = X[:i]

    print(X.shape)

    if overwrite:
        np.save("data/processed/X_36.npy", X)
        np.save("data/processed/y_36.npy", y)

    return X, y