コード例 #1
0
def run_eval_mend():
    img = cv2.imread('road-car.png')[np.newaxis, :, :, :]
    img = np.pad(img, ((0, 0), (32, 32), (32, 32), (0, 0)), 'reflect')
    # mask = cv2.imread('road-label.png')[np.newaxis, :, :, :]
    mask = cv2.imread('road-cloud0.png')[np.newaxis, :, :, :]
    mask = np.pad(mask, ((0, 0), (32, 32), (32, 32), (0, 0)), 'reflect')[:, :, :, 0:1]

    threshold = 244
    mask[mask < threshold] = 0
    mask[mask >= threshold] = 255

    # cv2.imshow('', mask[0])
    # cv2.waitKey(5432)
    eval_list = [img, mask, img, mask]

    from mod_mend_dila import init_train
    C = Config('mod_mend_dila')
    from mod_mend_nres import init_train
    C = Config('mod_mend_nres')
    inp_ground, inp_mask01, inp_grdbuf, inp_mskbuf, fetch, eval_fetch = init_train()

    C.size = img.shape[1]
    sess = mod_util.get_sess(C)
    mod_util.get_saver_logger(C, sess)
    print("||Training Check")
    eval_feed_dict = {inp_ground: eval_list[0],
                      inp_mask01: eval_list[1],
                      inp_grdbuf: eval_list[2],
                      inp_mskbuf: eval_list[3], }
    img_util.get_eval_img(mat_list=sess.run(eval_fetch, eval_feed_dict), channel=3,
                          img_path="%s/eval-%08d.jpg" % ('temp', 0))
コード例 #2
0
def run_eval_haze():
    img = cv2.imread('road-thin.png')[np.newaxis, :, :, :]
    img = np.pad(img, ((0, 0), (32, 32), (32, 32), (0, 0)), 'reflect')
    eval_list = [img, np.zeros_like(img[:, :, :, 0:1])]
    from mod_haze_unet import init_train
    inp_ground, inp_mask01, train_fetch, eval_fetch = init_train()

    C = Config('mod_haze_unet')
    C.size = img.shape[1]
    sess = mod_util.get_sess(C)
    mod_util.get_saver_logger(C, sess)
    print("||Training Check")
    eval_feed_dict = {inp_ground: eval_list[0],
                      inp_mask01: eval_list[1], }
    img_util.get_eval_img(mat_list=sess.run(eval_fetch, eval_feed_dict), channel=3,
                          img_path="%s/eval-%08d.jpg" % ('temp', 0))
コード例 #3
0
ファイル: conv.py プロジェクト: arokis/conv_py
    def configure(self, main_config):
        self.main_config = Config.read_config(main_config)

        scripts_config = Convpy.pathify(True,
                                        self.main_config['scripts']['path'],
                                        self.main_config['scripts']['config'])
        self.scripts = Config.read_config(scripts_config)

        engines_config = Convpy.pathify(True,
                                        self.main_config['engines']['path'],
                                        self.main_config['engines']['config'])
        self.engines = Config.read_config(engines_config)

        self.tmp_dir = os.path.dirname(Convpy.pathify(True, Convpy.tmp_file))

        output_params = self.main_config['output']
        self.output_dir = output_params['dir']
コード例 #4
0
def cloud_detect(aerials):
    import tensorflow as tf
    from configure import Config
    from utils import mod_util
    from mod_cloud_detect import unet
    C = Config('mod_cloud_detect')

    size = aerials.shape[1]

    unet_name, unet_dim = 'unet', 24
    inp_aerial = tf.placeholder(tf.uint8, [None, size, size, 3])
    ten_aerial = tf.to_float(inp_aerial) / 255
    eva_grdcld = unet(ten_aerial, unet_dim, 4, unet_name, reuse=False, training=False)
    eva_ground = eva_grdcld[:, :, :, 0:3]
    eva_mask01 = eva_grdcld[:, :, :, 3:4]

    sess = mod_util.get_sess(C)
    mod_util.get_saver_logger(C, sess)
    print("||Training Check")

    # aerials_shape = list(aerials.shape[-1:])
    # aerials_shape = [-1, 16] + aerials_shape
    # aerials = aerials.reshape(aerials_shape)

    grounds = list()
    mask01s = list()
    for i, aerial in enumerate(aerials):
        eval_feed_dict = {inp_aerial: aerial[np.newaxis, :, :, :]}
        # eval_fetch = [ten_aerial, eva_ground, eva_mask01]
        eval_fetch = [eva_ground, eva_mask01]
        mat_list = sess.run(eval_fetch, eval_feed_dict)

        grounds.append(np.clip(mat_list[0] * 255, 0, 255).astype(np.uint8))
        mask01s.append(np.clip(mat_list[1] * 255, 0, 255).astype(np.uint8))

        # img_util.get_eval_img(mat_list=mat_list,channel=3, img_write=False
        #                                  img_path="%s/eval-%08d.jpg" % ('temp', 0),)
        if rd.rand() < 0.01:
            print('Eval:', i)

    # def mats_list2jpg(mats_list, save_name):
    #     mats = np.concatenate(mats_list, axis=0)
    #     img = img_grid_reverse(mats)
    #     cv2.imwrite(save_name, img)
    #
    # mats_list2jpg(grounds, 'su_zhou/ground.jpg')
    # mats_list2jpg(mask01s, 'su_zhou/mask01.jpg')
    grounds = np.concatenate(grounds, axis=0)
    mask01s = np.concatenate(mask01s, axis=0)
    return grounds, mask01s
コード例 #5
0
def cloud_removal(aerials, label1s):
    import tensorflow as tf
    from configure import Config
    from utils import mod_util
    from mod_cloud_remove_rec import auto_encoder
    C = Config('mod_cloud_remove_rec')

    size = aerials.shape[1]

    gene_name, gene_dim = 'gene', 32
    inp_ground = tf.placeholder(tf.uint8, [None, size, size, 3])
    ten_ground = tf.to_float(inp_ground) / 255
    inp_mask01 = tf.placeholder(tf.uint8, [None, size, size, 1])
    ten_mask01 = tf.to_float(inp_mask01) / 255

    ten_mask10 = (1.0 - ten_mask01)
    ten_ragged = ten_ground * ten_mask10

    ten_patch3 = auto_encoder(ten_ragged - ten_mask01,
                              gene_dim, 3, gene_name,
                              reuse=False, training=False)
    out_ground = ten_ragged + ten_patch3 * ten_mask01

    sess = mod_util.get_sess(C)
    mod_util.get_saver_logger(C, sess)
    print("||Training Check")

    patch3s = list()
    grounds = list()

    for i, (aerial, label1) in enumerate(zip(aerials, label1s)):
        aerial = aerial[np.newaxis, :, :, :]
        label1 = label1[np.newaxis, :, :, 0:1]

        eval_feed_dict = {inp_ground: aerial,
                          inp_mask01: label1, }
        eval_fetch = [ten_patch3, out_ground]
        mat_list = sess.run(eval_fetch, eval_feed_dict)

        patch3s.append(np.clip(mat_list[0] * 255, 0, 255).astype(np.uint8))
        grounds.append(np.clip(mat_list[1] * 255, 0, 255).astype(np.uint8))
        if i % 64 == 0:
            print('Eval:', i)

    grounds = np.concatenate(grounds, axis=0)
    patch3s = np.concatenate(patch3s, axis=0)
    return grounds, patch3s,
コード例 #6
0
ファイル: mod_util.py プロジェクト: Yonv1943/CloudRemoval
def tf_test():
    """
    https://github.com/chiralsoftware/tensorflow/blob/master/convolve-blur.py
    """
    import cv2
    from util.img_util import Tools
    from configure import Config

    C = Config()
    T = Tools()

    cv2.namedWindow('beta', cv2.WINDOW_KEEPRATIO)
    # img = cv2.imread(os.path.join(C.aerial_dir, 'bellingham1.tif'))  # test
    img = cv2.imread(os.path.join(C.aerial_dir, 'austin1.tif'))  # train
    img = img[np.newaxis, :C.size, :C.size, :3]

    blur_size = 3
    channel = 3
    kernel_ary = np.zeros((blur_size, blur_size, channel, channel), np.float32)
    kernel_ary[:, :, 0, 0] = 1.0 / (blur_size**2)
    kernel_ary[:, :, 1, 1] = 1.0 / (blur_size**2)
    kernel_ary[:, :, 2, 2] = 1.0 / (blur_size**2)

    inp = tf.placeholder(tf.float32, [None, C.size, C.size, 3])
    ten = tf.nn.conv2d(
        tf.pad(inp, ((0, 0), (2, 2), (2, 2), (0, 0)), 'REFLECT'),
        tf.constant(kernel_ary), (1, 1, 1, 1), 'VALID')

    tf_config = tf.ConfigProto(device_count={'GPU': 0})
    sess = tf.Session(config=tf_config)
    img = sess.run(ten, {inp: img})
    sess.close()

    img = np.array(img[0], np.uint8)
    T.img_check(img)
    cv2.imshow('beta', img)
    cv2.waitKey(3456)
コード例 #7
0
import re
import time
from datetime import datetime, timedelta
import os
import json

import requests

from configure import Config

since = datetime.strptime(Config.date_conf()['since'], "%Y%m%d")
if Config.date_conf()['until']:
    until = since + timedelta(days=Config.date_conf()['until'])
else:
    until = since + timedelta(days=1)

article = {'title': '', 'content': []}
my_voices_dir = Config().out_path().get('my', 'output_voice')
host_voices_dir = Config().out_path().get('host', 'output_voice')
output_notes = Config().out_path().get('notes', 'output_notes')

# 確保檔案目錄已經存在
# for path in [my_voices_dir, host_voices_dir, output_notes]:
#     if not os.path.exists(os.path.abspath(path)):
#         os.mkdir(os.path.abspath(path))

# 登入
login = '******'

headers = {
    'User-Agent':
コード例 #8
0
2018-10-10 Modify: Yonv1943

2018-10-11 save eval jpg
2018-10-12 'TF_CPP_MIN_LOG_LEVEL' tf.Session()
2018-10-12 origin, tensorflow.contrib.layers --> tf.layers
2018-10-12 elegant coding, stable
2018-10-13 C.size  28 --> 32, deeper, dcgan
2018-10-15 cloud removal
2018-10-21 'class Tools' move from mod_*.py to util.img_util.py 
2018-10-22 change mask from 'middle square' to 'spot'
2018-10-23 spot --> polygon
2018-10-23 for discriminator, tf.concat([tenx, mask], axis=0)
'''

C = Config('mod_GAN_circle')
T = img_util.Tools()
rd.seed(1943)


def model_save_npy(sess, print_info):
    tf_vars = tf.global_variables()

    '''save as singal npy'''
    npy_dict = dict()
    for var in tf_vars:
        npy_dict[var.name] = var.eval(session=sess)
        print("| FETCH: %s" % var.name) if print_info else None
    np.savez(C.model_npz, npy_dict)
    with open(C.model_npz + '.txt', 'w') as f:
        f.writelines(["%s\n" % key for key in npy_dict.keys()])
コード例 #9
0
    device = cfg.DEVICE
    model = cfg.MODEL

    model.eval()
    predictions = []
    with torch.no_grad():
        for data, label in loader:
            data = data.to(device)
            label = label.to(device)
            pred = model(data)['clipwise_output']
            predictions.append(pred.cpu().detach().numpy())

    pred = np.concatenate(predictions)
    label = dataset.label
    thresholds = np.linspace(0, 1, 101)
    for threshold in thresholds:
        score = mean_f1_score(pred, label, threshold)
        print(threshold, score)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--config',
                        type=str,
                        default='default',
                        help="YAML file name under configs/")
    args = parser.parse_args()

    cfg = Config(args.config, train=False)
    test(cfg)
コード例 #10
0
2018-10-12 'TF_CPP_MIN_LOG_LEVEL' tf.Session()
2018-10-12 origin, tensorflow.contrib.layers --> tf.layers
2018-10-12 elegant coding, stable
2018-10-13 C.size  28 --> 32, deeper, dcgan
2018-10-15 cloud removal
2018-10-21 'class Tools' move from mod_*.py to util.img_util.py 
2018-10-22 change mask from 'middle square' to 'spot'
2018-10-23 spot --> polygon
2018-10-23 for discriminator, tf.concat([tenx, mask], axis=0)
2018-11-05 for generator
2018-11-06 for process_data, tiny, U-net for generator
2018-11-06 buffer mask01 for discriminator, inp_dim_4
2018-11-06 inp, buf, mask
'''

C = Config('mod_mend_mask')
T = img_util.Tools()
rd.seed(1943)


def model_save_npy(sess, print_info):
    tf_vars = tf.global_variables()

    '''save as singal npy'''
    npy_dict = dict()
    for var in tf_vars:
        npy_dict[var.name] = var.eval(session=sess)
        print("| FETCH: %s" % var.name) if print_info else None
    np.savez(C.model_npz, npy_dict)
    with open(C.model_npz + '.txt', 'w') as f:
        f.writelines(["%s\n" % key for key in npy_dict.keys()])
コード例 #11
0
import tensorflow as tf
import tensorflow.layers as tl

from configure import Config
from util import img_util
from util import mod_util
'''
2018-10-10  Yonv1943

2018-11-14  kernel2 better than kernel4, but low solution
2018-11-15  add conv_tp_conv to decoder
2018-11-15  Debug: np.savez(C.model_npz, **npy_dict) FOR mod_save_npy()
2018-11-15  Debug: load from mod.npz FOR mod_sess_saver_logger()
'''

C = Config('mod_coderGAN_buff')
T = img_util.Tools()


def leru_batch_norm(ten):
    ten = tf.layers.batch_normalization(ten, training=True)
    ten = tf.nn.leaky_relu(ten)
    return ten


def conv(ten, dim, idx):
    filters = (2**(idx - 1)) * dim
    return tl.conv2d(ten, filters, 3, 2, 'same', activation=tf.nn.leaky_relu)


def conv_tp(ten, dim, idx):
コード例 #12
0
2018-10-10 Modify: Yonv1943

2018-10-11 save eval jpg
2018-10-12 'TF_CPP_MIN_LOG_LEVEL' tf.Session()
2018-10-12 origin, tensorflow.contrib.layers --> tf.layers
2018-10-12 elegant coding, stable
2018-10-13 C.size  28 --> 32, deeper, dcgan
2018-10-15 cloud removal
2018-10-21 'class Tools' move from mod_*.py to util.img_util.py 
2018-10-22 change mask from 'middle square' to 'spot'
2018-10-23 spot --> polygon
2018-10-23 for discriminator, tf.concat([tenx, mask], axis=0)
'''

C = Config('mod_defog')
T = img_util.Tools()
rd.seed(1943)


def model_save_npy(sess, print_info):
    tf_vars = tf.global_variables()
    '''save as singal npy'''
    npy_dict = dict()
    for var in tf_vars:
        npy_dict[var.name] = var.eval(session=sess)
        print("| FETCH: %s" % var.name) if print_info else None
    np.savez(C.model_npz, npy_dict)
    with open(C.model_npz + '.txt', 'w') as f:
        f.writelines(["%s\n" % key for key in npy_dict.keys()])
    '''save as several npy'''
コード例 #13
0
ファイル: img_util.py プロジェクト: xmjiayou/CloudRemoval-1
import os
import glob

import cv2
import numpy as np
import numpy.random as rd
from configure import Config

C = Config()
"""
2018-09-18  23:23:23 fix bug: img_grid() 
2018-09-19  upgrade: img_grid(), random cut
2018-10-21  'class Tools' move from mod_*.py to util.img_util.py 
2018-10-21  poly, blur debug
2018-10-24  stagger plot
2018-11-06  get_data__circle, circle_radius
2018-11-14  image_we_have
"""


class Tools(object):
    def img_check(self, img):
        print("| min,max %6.2f %6.2f |%s" %
              (np.min(img), np.max(img), img.shape))

    def ary_check(self, ary):
        print("| min,max %6.2f %6.2f |ave,std %6.2f %6.2f |%s" % (
            np.min(ary),
            np.max(ary),
            np.average(ary),
            float(np.std(ary)),
コード例 #14
0
from torch.utils.data import Dataset
from torchvision import models, transforms
from configure import Config
from utlity import *
from AudiotoImg import PrecomputedAudio
from torch.optim import lr_scheduler

import torch.optim as optim
import torch.nn as nn

if __name__ == '__main__':

    # Configuration file
    cfg = Config()
    # Prepare data - you need to uncomment the following line to enable data preprocessing
    # prepare_data(cfg)

    # create new dataset and normalized images - train
    pre_train = PrecomputedAudio(cfg.train_path,
                                 img_transforms=transforms.Compose([
                                     transforms.ToTensor(),
                                     transforms.Normalize(
                                         mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
                                 ]))
    # create new dataset and normalized images -val
    pre_val = PrecomputedAudio(cfg.valid_path,
                               img_transforms=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize(
                                       mean=[0.485, 0.456, 0.406],
コード例 #15
0
    grey_dir = os.path.join(data_dir, 'CloudGreyDataset_%dx%d' % (size, size))

    def __init__(self, model_dir='mod'):
        self.model_dir = model_dir
        self.model_name = 'mod'
        self.model_path = os.path.join(self.model_dir, self.model_name)
        self.model_npz = os.path.join(self.model_dir, self.model_name + '.npz')
        self.model_log = os.path.join(self.model_dir, 'training_npy.txt')


if __name__ != '__main__':
    from configure import Config  # for test

    print("|| TEST")

C = Config('mod_cloud_remove')
tf.set_random_seed(time.time() * 1943 % 178320049)


def auto_encoder(inp0, dim, out_dim, name, reuse, training=True):
    padding1 = tf.constant(((0, 0), (1, 1), (1, 1), (0, 0)))

    def leru_batch_norm(ten):
        ten = tl.batch_normalization(ten, training=training)
        ten = tf.nn.leaky_relu(ten)
        return ten

    def conv_tp(ten, idx):
        filters = (2**idx) * dim
        ten = tl.conv2d_transpose(ten,
                                  filters,
コード例 #16
0
from tensorboardX import SummaryWriter
from configure import Config
from model import WNet
from torch.utils.data import DataLoader
from Ncuts import NCutsLoss
#from AttenUnet import Attention_block
import time
import os
import argparse
import sys

from saveimg_helper import makegridimg3D

from AbdomenDS import AbdomenDS
if __name__ == '__main__':
    config = Config()
    parser = argparse.ArgumentParser(description='3D Attention WNet')
    parser.add_argument(
        "-d",
        "--dataset",
        required=False,
        help="Dataset Type: 1 = InPhase, 2 = OutPhase, 3 = T2, 4 = In+OutPhase"
    )
    parser.add_argument("-l",
                        "--losscombine",
                        required=False,
                        help="Loss Combine: 1 = True, 0 = False")
    parser.add_argument(
        "-c",
        "--cuda",
        required=False,
コード例 #17
0
Reference: https://github.com/cameronfabbri/Improved-Wasserstein-GAN
Reference: https://github.com/znxlwm/tensorflow-MNIST-GAN-DCGAN

2018-10-10 Modify: Yonv1943

2018-10-11 save eval jpg
2018-10-12 'TF_CPP_MIN_LOG_LEVEL' tf.Session()
2018-10-12 origin, tensorflow.contrib.layers --> tf.layers
2018-10-12 elegant coding, stable
2018-10-13 C.size  28 --> 32, deeper, dcgan
2018-10-15 cloud removal
2018-10-21 'class Tools' move from mod_*.py to util.img_util.py 
2018-10-22 change mask from 'middle square' to 'spot'
'''

C = Config('mod_GAN_spot')
T = img_util.Tools()
rd.seed(1943)


def model_save_npy(sess, print_info):
    tf_vars = tf.global_variables()

    '''save as singal npy'''
    npy_dict = dict()
    for var in tf_vars:
        npy_dict[var.name] = var.eval(session=sess)
        print("| FETCH: %s" % var.name) if print_info else None
    np.savez(C.model_npz, npy_dict)
    with open(C.model_npz + '.txt', 'w') as f:
        f.writelines(["%s\n" % key for key in npy_dict.keys()])
コード例 #18
0
    grey_dir = os.path.join(data_dir, 'CloudGreyDataset')

    def __init__(self, model_dir='mod'):
        self.model_dir = model_dir
        self.model_name = 'mod'
        self.model_path = os.path.join(self.model_dir, self.model_name)
        self.model_npz = os.path.join(self.model_dir, self.model_name + '.npz')
        self.model_log = os.path.join(self.model_dir, 'training_npy.txt')


if __name__ != '__main__':
    from configure import Config  # for test

    print("||TEST")

C = Config('mod_cloud_detect')
tf.set_random_seed(time.time() * 1943 % 178320049)


def unet(inp0, dim, out_dim, name, reuse, training=True):
    def leru_batch_norm(ten):
        ten = tf.layers.batch_normalization(ten, training=training)
        ten = tf.nn.leaky_relu(ten)
        return ten

    paddings = tf.constant(((0, 0), (1, 1), (1, 1), (0, 0)))

    def conv_pad(ten, idx, step=2):
        filters = (2**idx) * dim
        ten = tf.pad(ten, paddings, 'REFLECT')
        ten = tl.conv2d(ten,
コード例 #19
0
from logging import getLogger

from experiment import Experiment
from configure import Config
from utils.seeder import seed_everything
from utils.mylog import timer, create_logger


@timer
def main(config):
    experiment = Experiment(config)
    experiment.run()


if __name__ == "__main__":
    c = Config()
    c.set_parameter(config_dir=pathlib.Path('src/config'), use_option=True)
    gc.enable()
    seed_everything(c.runtime.RANDOM_SEED)

    create_logger('main', c.log)
    create_logger('train', c.log)
    logger = getLogger('main')
    logger.info(f':thinking_face: ============ {datetime.now():%Y-%m-%d %H:%M:%S} ============ :thinking_face:')

    try:
        main(c)
        logger.info(f':sunglasses: ============ {datetime.now():%Y-%m-%d %H:%M:%S} ============ :sunglasses:')
    except Exception:
        logger.critical(f':smiling_imp: Exception occured \n {traceback.format_exc()}')
        logger.critical(f':skull: ============ {datetime.now():%Y-%m-%d %H:%M:%S} ============ :skull:')
コード例 #20
0
def run_eval():
    mat_list = list()
    for name in ['ground', 'out_aer', 'out_cld']:
        img = cv2.imread('eval_replace_%s.jpg' % name)
        mat_list.append(img[np.newaxis, :, :, :])

    import tensorflow as tf
    import mod_mend_buff as mod
    import os
    class Config(object):
        train_epoch = 2 ** 14
        train_size = int(2 ** 17 * 1.9)
        eval_size = 2 ** 3
        batch_size = 2 ** 4
        batch_epoch = train_size // batch_size

        size = int(2 ** 9)  # size = int(2 ** 7)
        replace_num = int(0.25 * batch_size)
        learning_rate = 1e-5  # 1e-4

        show_gap = 2 ** 5  # time
        eval_gap = 2 ** 9  # time
        gpu_limit = 0.48  # 0.0 ~ 1.0
        gpu_id = 1

        data_dir = '/mnt/sdb1/data_sets'
        aerial_dir = os.path.join(data_dir, 'AerialImageDataset/train')
        cloud_dir = os.path.join(data_dir, 'ftp.nnvl.noaa.gov_color_IR_2018')
        grey_dir = os.path.join(data_dir, 'CloudGreyDataset')

        def __init__(self, model_dir='mod'):
            self.model_dir = model_dir
            self.model_name = 'mod'
            self.model_path = os.path.join(self.model_dir, self.model_name)
            self.model_npz = os.path.join(self.model_dir, self.model_name + '.npz')
            self.model_log = os.path.join(self.model_dir, 'training_npy.txt')

    C = Config('mod_mend_GAN_buff')

    gene_name = 'gene'
    inp_ground = tf.placeholder(tf.uint8, [None, C.size, C.size, 3])
    inp_cloud1 = tf.placeholder(tf.uint8, [None, C.size, C.size, 1])

    flt_ground = tf.to_float(inp_ground) / 255.0
    flt_cloud1 = tf.to_float(inp_cloud1) / 255.0
    ten_repeat = tf.ones([1, 1, 1, 3])

    ten_ground = flt_ground[:C.batch_size]
    # buf_ground = flt_ground[C.batch_size:]
    ten_cloud1 = flt_cloud1[:C.batch_size]

    ten_cloud3 = ten_cloud1 * ten_repeat
    ten_mask10 = (1.0 - ten_cloud3)
    ten_ragged = ten_ground * ten_mask10

    ten_patch3 = mod.generator(tf.concat((ten_ragged, ten_cloud3), axis=3),
                               32, 3, gene_name, reuse=False)
    out_ground = ten_ragged + ten_patch3 * ten_cloud3

    from utils import mod_util
    sess = mod_util.get_sess(C)
    saver, logger, pre_epoch = mod_util.get_saver_logger(C, sess)
    print("||Training Check")
    # eval_fetch = [ten_ground, out_ground, ten_patch3, ten_cloud3]
    eval_fetch = [out_ground, ten_patch3]
    eval_feed_dict = {inp_ground: mat_list[0],
                      inp_cloud1: mat_list[2][:, :, :, 0:1]}

    mat_list = sess.run(eval_fetch, eval_feed_dict)
    for img, name in zip(mat_list, ['out_ground', 'ten_patch3']):
        img = (img[0] * 255).astype(np.uint8)
        cv2.imshow('beta', img)
        cv2.waitKey(4321)
        print(img.shape, np.max(img))
        cv2.imwrite('eval_gan_%s.jpg' % name, img)

    print(end="  EVAL")
コード例 #21
0
ファイル: img_util.py プロジェクト: xmjiayou/CloudRemoval-1
import os
import glob

import cv2
import numpy as np
import numpy.random as rd
from configure import Config

G = Config()
"""
2018-09-18 23:23:23 fix bug: img_grid() 
2018-09-19 15:12:12 upgrade: img_grid(), random cut
"""


class Cloud2Grey(object):
    def __init__(self):
        self.map_pts = np.load(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         'white_line_eliminate_map.npy'))

    def run(self, img):
        for i, j, x, y in self.map_pts:  # eliminate white line
            img[i, j] = img[x, y]

        switch = np.array(img[:, :, 2], dtype=np.int) - img[:, :, 0]
        switch = np.clip(
            switch - 4, 0,
            1)  # The part with thick cloud, could not see the ground

        out = np.clip(img[:, :, 1], 60, 195)
コード例 #22
0
    grey_dir = os.path.join(data_dir, 'CloudGreyDataset_%dx%d' % (size, size))

    def __init__(self, model_dir='mod'):
        self.model_dir = model_dir
        self.model_name = 'mod'
        self.model_path = os.path.join(self.model_dir, self.model_name)
        self.model_npz = os.path.join(self.model_dir, self.model_name + '.npz')
        self.model_log = os.path.join(self.model_dir, 'training_npy.txt')


if __name__ != '__main__':
    from configure import Config  # for test

    print("|| TEST")

C = Config('mod_cloud_remove_rec')
tf.set_random_seed(time.time() * 1943 % 178320049)


def auto_encoder(inp0, dim, out_dim, name, reuse, training=True):
    padding1 = tf.constant(((0, 0), (1, 1), (1, 1), (0, 0)))

    def leru_batch_norm(ten):
        ten = tl.batch_normalization(ten, training=training)
        ten = tf.nn.leaky_relu(ten)
        return ten

    def conv_tp(ten, idx):
        filters = (2**idx) * dim
        ten = tl.conv2d_transpose(ten,
                                  filters,
コード例 #23
0
import numpy.random as rd
import tensorflow as tf
import tensorflow.layers as tl

from configure import Config
from util import img_util
from util import mod_util
'''
2018-10-10  Yonv1943

2018-11-14  kernel2 better than kernel4, but low solution
2018-11-15  add conv_tp_conv to decoder
2018-11-15  Debug: np.savez(C.model_npz, **npy_dict) FOR mod_save_npy()
'''

C = Config('mod_AutoEncoderGAN')
T = img_util.Tools()
rd.seed(1943)


def leru_batch_norm(ten):
    ten = tf.layers.batch_normalization(ten, training=True)
    ten = tf.nn.leaky_relu(ten)
    return ten


def conv(ten, dim, idx):
    filters = (2**(idx - 1)) * dim
    return tl.conv2d(ten, filters, 3, 2, 'same', activation=tf.nn.leaky_relu)

コード例 #24
0
import os
import time
import shutil

import cv2
import numpy as np
import numpy.random as rd
import tensorflow as tf
import tensorflow.layers as tl

from configure import Config
from util import img_util

C = Config('mod_mend_Unet')
T = img_util.Tools()
rd.seed(1943)


def run():
    cv2.namedWindow('beta', cv2.WINDOW_KEEPRATIO)
    img = cv2.imread(os.path.join(C.aerial_dir, 'bellingham1.tif'))
    # cv2.imshow('beta', img)
    # cv2.waitKey(3456)

    channel = 3

    xlen, ylen = img.shape[0:2]
    xmod = xlen % C.size
    ymod = ylen % C.size

    xrnd = int(rd.rand() * xmod)
コード例 #25
0
import os, shutil
from os import path
from os.path import basename
from configure import Config
config = Config()

folders = []

# Quick N Dirty - non-configurable
folders.append("/content/bach-thes/notebooks/UNet/Train_images/train")
folders.append("/content/bach-thes/notebooks/UNet/Train_images/train_masks")
folders.append(
    "/content/bach-thes/notebooks/UNet/Train_images/resampled_train")
folders.append(
    "/content/bach-thes/notebooks/UNet/Train_images/resampled_train_masks")
folders.append(
    "/content/bach-thes/notebooks/UNet/Train_images/extracted_train")
folders.append(
    "/content/bach-thes/notebooks/UNet/Train_images/extracted_train_masks")

folders.append("/content/bach-thes/notebooks/UNet/Train_images/train2")
folders.append("/content/bach-thes/notebooks/UNet/Train_images/train2_masks")
folders.append(
    "/content/bach-thes/notebooks/UNet/Train_images/resampled_train2")
folders.append(
    "/content/bach-thes/notebooks/UNet/Train_images/resampled_train2_masks")
folders.append(
    "/content/bach-thes/notebooks/UNet/Train_images/extracted_train2")
folders.append(
    "/content/bach-thes/notebooks/UNet/Train_images/extracted_train2_masks")
コード例 #26
0
Reference: https://github.com/znxlwm/tensorflow-MNIST-GAN-DCGAN

2018-10-10 Modify: Yonv1943

2018-10-11 save eval jpg
2018-10-12 'TF_CPP_MIN_LOG_LEVEL' tf.Session()
2018-10-12 origin, tensorflow.contrib.layers --> tf.layers
2018-10-12 elegant coding, stable
2018-10-13 C.size  28 --> 32, deeper, dcgan
2018-10-15 cloud removal
2018-10-21 'class Tools' move from mod_*.py to util.img_util.py 
2018-10-22 change mask from 'middle square' to 'spot'
2018-10-23 spot --> polygon
'''

C = Config('mod_GAN_poly')
T = img_util.Tools()
rd.seed(1943)


def model_save_npy(sess, print_info):
    tf_vars = tf.global_variables()
    '''save as singal npy'''
    npy_dict = dict()
    for var in tf_vars:
        npy_dict[var.name] = var.eval(session=sess)
        print("| FETCH: %s" % var.name) if print_info else None
    np.savez(C.model_npz, npy_dict)
    with open(C.model_npz + '.txt', 'w') as f:
        f.writelines(["%s\n" % key for key in npy_dict.keys()])
    '''save as several npy'''
コード例 #27
0
ファイル: train.py プロジェクト: kumatheworld/niaoge
                'val': val_score
            }, epoch)

        scheduler.step()

        # save model
        if best_score <= val_score and not cfg.SANITY_CHECK:
            best_score = val_score
            checkpoint = {
                'config': cfg.cfg,
                'epoch': epoch,
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'scheduler': scheduler.state_dict(),
            }
            torch.save(checkpoint, cfg.CKPT_PATH)

    writer.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--config',
                        type=str,
                        default='default',
                        help="YAML file name under configs/")
    args = parser.parse_args()

    cfg = Config(args.config, train=True)
    train(cfg)
コード例 #28
0
ファイル: bot.py プロジェクト: Disaxy/music-bot
# from aiogram.contrib.fsm_storage.redis import RedisStorage2
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.types import ParseMode
from aiogram.utils.executor import start_webhook
from aiogram.utils.markdown import bold, code, italic, pre, text
from loguru import logger

from configure import Config
from middlewares import AccessMiddleware
from parser import SounCloudDownloader

scp = SounCloudDownloader('https://www.forhub.io/download.php')

config_file_path = join(dirname(abspath('config.ini')), 'config.ini')
config = Config(config_file_path)

loop = asyncio.get_event_loop()
bot = Bot(config.API_TOKEN,
          proxy=config.PROXY_URL,
          parse_mode=ParseMode.MARKDOWN_V2)
# storage = RedisStorage2(config.REDIS_HOST)
dp = Dispatcher(bot, loop=loop)


@dp.message_handler(state='*', commands=['start'])
async def send_welcome(message: types.Message, state: FSMContext):
    await message.answer(text='Кидай ссылку на тречик в soundcloud')


@dp.message_handler(state='*', content_types=['text'])