import keras
from keras.optimizers import Adam
from keras import backend as K
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.models import load_model
from src import metric, model, io, util, dataGenerator, loss
from src.bf_grid import bf_grid
import config

physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
_config = tf.config.experimental.set_memory_growth(physical_devices[0], True)

timing = {}
util.check_dir(config.path_logs)
util.set_logger(os.path.join(config.path_logs, 'train.log'))

parser = argparse.ArgumentParser(
    description='See description below to see all available options')

parser.add_argument('-pt',
                    '--pretrained',
                    help='Continuining training from the given model. \
                          [Default] is no model given',
                    default=None,
                    type=str,
                    required=False)

parser.add_argument('-w',
                    '--weight',
                    help='If model provided is Model Weight or not. \
import os
import logging

from src import util
from src import download
from src import converter
import config

# Logging training
util.set_logger(os.path.join(config.path_logs, 'process.log'))
logging.info('################## Starting Data ####################')
path_output = 'data'
path_forecast = 'forecast'

bucket = 'silam-air-quality'

# Download data
data = download.SilamDataset()
data.download(path_output=path_output,
              path_forecast=path_forecast,
              start_date='2020-12-17',
              end_date='2020-12-17',
              parameter_list=['CO', 'NO2', 'NO', 'O3', 'PM10', 'PM25', 'SO2'],
              forecast_day_list=[0, 1, 2, 3, 4])

# Convertng nc to cog tif
nc_to_cog = converter.ProcessingNC()
path_data = util.list_list(path=path_output, extension='nc')
path_data_forecast = util.list_list(path=path_forecast, extension='nc')

# If same day data
示例#3
0
from src import metric
from src import io
from src import util
from src import bf_grid
from src import metric
from src import dataGenerator
from src import model

import config

# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
# _config = tf.config.experimental.set_memory_growth(physical_devices[0], True)

util.check_dir(config.path_logs)
util.set_logger(os.path.os.path.join(config.path_logs, 'testing.log'))

parser = argparse.ArgumentParser(
    description='See description below to see all available options')

parser.add_argument(
    '-sg',
    '--skipGridding',
    help='If skipping grididing while testing. [Default] False',
    type=bool,
    default=False,
    required=False)

parser.add_argument('-d',
                    '--data',
                    help='Input Data folder where TIF files are stored',
import logging
import config
import os
from src import io, util

util.check_dir(config.path_logs)
util.set_logger(os.path.join(config.path_logs, 'generateDataset.log'))

# Checking directories
util.check_dir(config.path_model)
util.check_dir(config.path_weight)
util.check_dir(config.path_prediction)
util.check_dir(config.path_tiled)
util.check_dir(config.path_tiled_image)
util.check_dir(config.path_tiled_label)

# Validation
util.check_dir(config.path_vali_tiled_image)
util.check_dir(config.path_vali_tiled_label)

path_image = config.path_image_vrt
path_label = config.path_label_vrt

path_vali_image = config.path_vali_image_vrt
path_vali_label = config.path_vali_label_vrt

logging.info(
    'path_image: {}, path_label: {}, path_vali_image: {}, path_vali_label: {}'.
    format(path_image, path_label, path_vali_image, path_vali_label))

logging.info('Tiling Training Images...')
import gdal
import os
import config
from src import util
import logging

util.check_dir(config.path_logs)
util.set_logger(os.path.join(config.path_logs, 'generateMutiRes.log'))


def drange(start: float, stop: float, step: float):
    r = start
    while r < stop:
        yield round(r, 1)
        r += step


def getRes(path_tif: str) -> None:
    """
    Reading Resolution of input iamge
    Input:
        path_tif: path of input to be processed
    Output:
        None
    """
    ds = gdal.Open(path_tif)
    resolution = round(ds.GetGeoTransform()[1], 1)
    return resolution


logging.info('Iterating Training data in folder: {}'.format(config.path_image))
示例#6
0
import config
from src import util

import numpy as np
import cv2
import os
from tqdm import tqdm
import logging

paramDict = config.modelParam
util.save_json(paramDict, config.pathParam)
params = util.Params(config.pathParam)

# Checking for logging folder and creating logging file
util.check_dir(config.pathLog)
util.set_logger(os.path.join(config.pathLog, 'generateAugmentation.log'))

logging.info(' Reading all the image file present in training data folder')
# Reading all the image file present in training data folder
pathTrainingData = []
for root, dirs, files in os.walk(config.pathTraining):
    for file in files:
        if file.endswith(tuple(config.imageFormat)):
            pathTrainingData.append(os.path.join(root, file))

# Checking for Image augmentations
xflip = params.xflip
yflip = params.yflip

if (xflip) and (yflip) is True:
    bothflip = True