예제 #1
0
from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import os
import sys
from util.logs import get_logger
from . import MODEL_REGISTRY

__all__ = ['InceptionV4', 'inceptionv4']
logger = get_logger('inception')

pretrained_settings = {
    'inceptionv4': {
        'imagenet': {
            'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
            'input_space': 'RGB',
            'input_size': [3, 299, 299],
            'input_range': [0, 1],
            'mean': [0.5, 0.5, 0.5],
            'std': [0.5, 0.5, 0.5],
            'num_classes': 1000
        },
        'imagenet+background': {
            'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
            'input_space': 'RGB',
            'input_size': [3, 299, 299],
            'input_range': [0, 1],
            'mean': [0.5, 0.5, 0.5],
            'std': [0.5, 0.5, 0.5],
예제 #2
0
import torch
from torch import nn
from util.logs import get_logger
import numpy as np
from . import MODEL_REGISTRY

logger = get_logger('dark loss')


def conv_batch(in_num, out_num, kernel_size=3, padding=1, stride=1):
    return nn.Sequential(
        nn.Conv2d(in_num,
                  out_num,
                  kernel_size=kernel_size,
                  stride=stride,
                  padding=padding,
                  bias=False), nn.BatchNorm2d(out_num), nn.LeakyReLU())


# Residual block
class DarkResidualBlock(nn.Module):
    def __init__(self, in_channels):
        super(DarkResidualBlock, self).__init__()
        reduced_channels = int(in_channels / 2)
        self.layer1 = conv_batch(in_channels,
                                 reduced_channels,
                                 kernel_size=1,
                                 padding=0)
        self.layer2 = conv_batch(reduced_channels, in_channels)

    def forward(self, x):
예제 #3
0
import numpy as np
from itertools import product
import math
from util.logs import get_logger


logger = get_logger('safas')


class ImageSlider:

    @staticmethod
    def calculate_start_p(stride, size):
        num_step = math.ceil(size / stride)
        stride = size / num_step
        for i in range(num_step + 1):
            yield math.floor(stride * i)

    def __init__(self, image_shape=None, width=512, overlapping=0.2):
        self.image_shape = image_shape[:2]
        self.width = width
        self.overlapping = overlapping
        self.top_left_points = []
        self.stride = (1 - self.overlapping) * self.width

        def calculate_start_p(stride, size):
            num_step = math.ceil(size / stride)
            stride = size / num_step
            for i in range(num_step + 1):
                yield math.floor(stride * i)
예제 #4
0
import torch.nn as nn
import torch.nn.functional
import matplotlib.pyplot as plt
from torch.optim import SGD
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm

from util.logs import get_logger
from voc2012 import VOC
from tensorboardX import SummaryWriter
from util.npdraw import draw_bounding_box
from functools import reduce
import random
import matplotlib.pyplot as plt

logger = get_logger('f**k me')


class VGG(nn.Module):
    def __init__(self, init_weights=True):
        super(VGG, self).__init__()
        self.features = self.make_layers(
            [
                64,
                64,
                'M',  # 3, 5, 6
                128,
                128,
                'M',  # 10, 14 16
                256,
                256,
예제 #5
0
파일: bconfig.py 프로젝트: dailing/util
import yaml
from util.logs import get_logger
from abc import ABC, abstractmethod
from io import StringIO
from collections.abc import Mapping
from abc import ABC, abstractmethod
import argparse
import json

try:
    import yaml
except ModuleNotFoundError as e:
    yaml = None


logger = get_logger('config')


class Field(ABC):
    def _get_readable_value(self):
        raise Exception('not implemented')

    def _get_bin_value(self):
        raise Exception('not implemented')

    def _set_value(self, value):
        raise Exception('not implemented')

    def _to_dict(self):
        return self._get_readable_value()
예제 #6
0
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torch.optim import SGD
from tqdm import tqdm
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid

from augment import FundusAOICrop, CompostImageAndLabel
from model import mean_iou, Mrcnn
from util.files import assert_exist, check_exist
from util.logs import get_logger
from util.npdraw import draw_bounding_box
from util.segmentation2bbox import segmentation2bbox
from model import restore_box_reg
import matplotlib.pyplot as plt
logger = get_logger('ma detection')
logWriter = SummaryWriter(logdir=f'log/f**k')

debug = os.getenv('DEBUG')


class VGG(nn.Module):
    def __init__(self, init_weights=True):
        super(VGG, self).__init__()
        self.features = self.make_layers(
            [64, 64, 'M',  # 3, 5, 6
             128, 128, 'M',  # 10, 14 16
             256, 256, 512, 'M',  # 24, 32, 40, 44
             512, 512, 512, 'M',  # 60, 76, 92, 100
             # 512, 512, 512, 'M',
             ],  # 132, 164, 196, 212
예제 #7
0
import random
from collections.abc import Iterable
from typing import Callable
import traceback
import base64
from os.path import join as pjoin
import pickle
import io

try:
    import pymongo
    import gridfs
except ImportError as e:
    pymongo = None

logger = get_logger('Processing')


class TaskFunc():
    def init_env(self):
        pass

    def __call__(self, *arg, **kwargs):
        pass


class ProcessPool(object):
    class DelayedResult:
        def __init__(self, task_id, parent):
            self.task_id = task_id
            self.parent = parent
예제 #8
0
                data[k] = v
            except Exception as e:
                logger.info(e, exc_info=True)
            logger.info(f'passing image succ!')
        elif type(v) in [dict, list, tuple]:
            process_data(v)


# config = DCLCONFIG.build()
# config.from_yaml('config_files/yolo_test.yaml')
# config.parse_args()
# pp = Predictor(config)
# pp.init_env()

app = Flask(__name__)
logger = get_logger('server logger')
# redis_host = 'localhost'
# app.config.from_object(FlaskConfig())
# logger.info('f**k')


@app.route("/")
def f**k():
    return 'f**k  you !'


@app.route("/api/grade", methods=['POST'])
def grade_one():
    data = request.get_data(as_text=True)
    data = json.loads(data)
    logger.info(data)
예제 #9
0
import torch.nn as nn
import torch

from torch.nn import Parameter
import torch.nn.init as init
import math
from util.logs import get_logger
from .inceptionv4 import InceptionV4
from .resnet import ResNet18, ResNet101
from torchvision.models.utils import load_state_dict_from_url
from . import MODEL_REGISTRY

logger = get_logger('experiment')


@MODEL_REGISTRY.register()
class ExperimentModel(nn.Module):
    def __init__(self, num_classes=10):
        super(ExperimentModel, self).__init__()
        # self.features = nn.Sequential(
        #     nn.Conv2d(1, 20, kernel_size=5, stride=1),
        #     nn.ReLU(inplace=True),
        #     nn.MaxPool2d(kernel_size=2, stride=2),
        #     nn.Conv2d(20, 50, kernel_size=5),
        #     nn.ReLU(inplace=True),
        #     nn.MaxPool2d(kernel_size=2, stride=2),
        # )

        ####################################
        # self.features = InceptionV4()
        # state_dict = load_state_dict_from_url(
예제 #10
0
파일: main.py 프로젝트: dailing/search_proj
import datetime
import psycopg2
import math
import base64
from hashlib import md5
import uuid
from peewee import fn, DoesNotExist
import time
from elasticsearch import Elasticsearch
from playhouse.shortcuts import model_to_dict
from playhouse.postgres_ext import Match
import six
import math
import zerorpc

logger = get_logger('search project learning')

app = Flask(__name__, static_folder='statics', static_url_path='/static')
api = Api(app)
app.config['MAX_CONTENT_LENGTH'] = 256 * 1024 * 1024
es = Elasticsearch(['es01:9200'])
psql_db = DatabaseProxy()


def json_encoder_default(obj):
    datetime_format = "%Y/%m/%d %H:%M:%S"
    date_format = "%Y/%m/%d"
    time_format = "%H:%M:%S"
    # if isinstance(obj, Decimal):
    #     return str(obj)
    if isinstance(obj, datetime.datetime):
예제 #11
0
import click

from main import ImageStorage, psql_db
import playhouse.db_url
import os
from util.logs import get_logger

logger = get_logger('dataset log')

psql_db.initialize(
    playhouse.db_url.connect(
        'postgresql://*****:*****@localhost:25068/fuckdb'))


@click.command()
@click.option('--path', help='path of images')
@click.option('--session_name', help='session name')
def add_image(path, session_name):
    for p, _, f in os.walk(path):
        for fname in f:
            fname = os.path.join(p, fname)
            ImageStorage.add_file(fname, session_name)
            logger.info(fname)


if __name__ == "__main__":
    add_image()
예제 #12
0
import PIL
import pandas as pd
import cv2
import numpy as np
import pickle
from torch.utils.data import Dataset
from main import ImageAnnotation, psql_db, ImageStorage
import playhouse.db_url
from util.logs import get_logger
import torch
import datetime
import random

logger = get_logger('f**k sql db')


class SqlDB(Dataset):
    def __init__(self,
                 split='train',
                 db_url='postgresql://*****:*****@localhost:25068/fuckdb',
                 table_name='imageannotation',
                 max_box=2):
        self.db_url = db_url
        self.max_box = max_box

    def _init_env(self):
        if not hasattr(self, 'images'):
            if psql_db.is_closed():
                psql_db.initialize(playhouse.db_url.connect(self.db_url))
            result = ImageAnnotation.select().where(
                    (ImageAnnotation.session_name=='disk_set') and
예제 #13
0
파일: main.py 프로젝트: dailing/mask_rcnn

class Session(BaseModel):
    session_name = TextField(default=lambda: uuid.uuid4().hex, unique=True)
    timestamp = DateTimeField(default=datetime.datetime.now)


try:
    psql_db.initialize(
        playhouse.db_url.connect('postgresql://*****:*****@db:5432/fuckdb'))
    psql_db.connect()
    psql_db.create_tables([ImageStorage, ImageAnnotation, Session])
except Exception as e:
    print(e)

logger = get_logger('interactive learning')


def image2base64(f):
    # check if there are any image, parse image to base64
    def wrapper(*args, **kwargs):
        result = f(*args, **kwargs)
        if type(result) is not dict:
            if type(result) is np.ndarray:
                result = {'data': result}
            else:
                logger.warn(f'cannot parse {type(result)} type.')
        for k, v in result.items():
            if type(v) is np.ndarray:
                if np.issubdtype(v.dtype, np.floating):
                    v = (v * 255).astype(np.uint8)
예제 #14
0
파일: augment.py 프로젝트: dailing/util
# from skimage.filters import gaussian
import numpy as np
import random
import cv2
from util.logs import get_logger
from typing import Callable
from util.process_pool import run_once
import traceback
import inspect
import pickle
import os.path
import zlib
# import torchvision

logger = get_logger('augment.logger')


class Transform(ABC):
    counter = 0

    def __init__(self):
        self.id = Transform.counter
        Transform.counter += 1

    def __call__(self, *img):
        """
        this is the function that calls the actual transformation function.
        I added this layer for easy managing, such as caching, logging etc.
        DO NOT override this function.
        :param img: input image
예제 #15
0
from util.logs import get_logger
from torch.utils.data import DataLoader
from dataset.lesion_seg_mask_rcnn import LesionSegMask
from os import cpu_count
from torch.utils.data import Dataset
from util.augment import Compose, ToFloat, ToTensor
from model.maskrcnn import MaskRCNN
from torch.optim import Adam, SGD
from sqlitedict import SqliteDict
from io import BytesIO
from tqdm import tqdm
import torch
import os
from tensorboardX import SummaryWriter

logger = get_logger('main')
num_processor = 2
device = torch.device('cuda')
summery_writer = SummaryWriter(logdir=f'log/maskrcnn/log')


def wtire_summary(loss_map, tag='train', step=None):
    for k, v in loss_map.items():
        summery_writer.add_scalar(f'{tag}/{k}_loss',
                                  v.detach().cpu().numpy(),
                                  global_step=step)


class TrainEvalDataset(Dataset):
    def __init__(self, data_reader, split='train'):
        super().__init__()
예제 #16
0
파일: dcl.py 프로젝트: dailing/mask_rcnn
from util.augment import augment_map, Compose
from util.logs import get_logger
from scipy.special import softmax
import sys
from dataset import datasets as avaliable_datasets
import argparse
from sklearn.metrics import average_precision_score, roc_auc_score
import util.bconfig
import model
from model.deeplab_v3 import CrossEntropy2d
import pickle
from itertools import chain
from model.experiment import ExperimentLoss
from os import cpu_count

logger = get_logger('fff')
device = torch.device('cuda')
num_processor = cpu_count()


class NetModel(nn.Module):
    def __init__(self, config, with_feature=False):
        super().__init__()
        if config.net_parameters is None:
            config.net_parameters = {}
        self.base_net = config.basenet(**config.net_parameters)
        self.config = config
        self.with_feature = with_feature
        for i in self.config.outputs:
            logger.info(f'{i.layer_parameters}')
            layer = i.model(**i.layer_parameters)
예제 #17
0
import unittest
import numpy as np

from util.process_pool import CachedFunction
from util.image_process import ImageSlider
from util.logs import get_logger
import matplotlib.pyplot as plt
import hashlib

logger = get_logger('f**k')


class TestCachedFunc(unittest.TestCase):
    def test_int_single_key(self):
        @CachedFunction(cache_dir='/tmp/test/cache')
        def sqrt(x):
            return x**2

        for i in range(100):
            self.assertEqual(sqrt(i), i**2)
        for i in range(100):
            self.assertEqual(sqrt(i), i**2)
        self.assertEqual('f**k', 'f**k')

    def test_cached_func_file_input(self):
        np.random.seed(5153424)
        bytes_arr = [np.random.bytes(1024 * 325) for i in range(100)]
        md5_arr = []
        for i in bytes_arr:
            md5 = hashlib.md5()
            md5.update(i)
예제 #18
0
from torch.utils.data import DataLoader, Dataset
import pandas as pd
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
# import nvidia.dali.ops as ops
# import nvidia.dali.types as types
# from nvidia.dali.pipeline import Pipeline
from torch.utils.data._utils.collate import default_collate
from util.logs import get_logger
import torch
# from nvidia.dali.plugin.pytorch import DALIGenericIterator
from tqdm import tqdm


logger = get_logger('annotate')


# class ExternalInputIterator(object):
#     def __init__(
#             self, batch_size, split='train', test_split=0,
#             needed_labels=[]):
#         self.split = 'train' if split is None else split
#         self.root = '../data/annotation4'
#         files = pd.read_csv(f'{self.root}/dataset.csv')
#         if self.split == 'train':
#             self.image_list = files[files.split != test_split]
#         else:
#             self.image_list = files[files.split == test_split]
#         self.batch_size = batch_size