예제 #1
0
    def __init__(self,
                 dbname: str,
                 db_opts=None,
                 load_lut=True,
                 train=True,
                 db_root='../data'):
        super(torch.utils.data.Dataset, self).__init__()
        self.dbname = dbname.lower()
        self.db_path = osp.join(db_root, self.dbname)
        # Default arguments
        opts = Edict()
        opts.img_fmt = 'cam%d/%04d.png'  # [cam_idx, fidx]
        opts.lut_fmt = 'lt_(%d,%d,%d).hwd'  # lookup table fmt [equi_h, w, d]
        opts.gt_depth_fmt = 'omnidepth_gt_%d/%05d.tiff'  # [equi_w, fidx]
        opts.equirect_size, opts.num_invdepth = [160, 640], 192
        opts.phi_deg, opts.phi2_deg = 45, -1.0
        opts.min_depth = 0.5  # meter scale
        opts.max_depth = 1 / EPS
        opts.max_fov = 220.0  # maximum FOV of input fisheye images
        opts.read_input_image = True  # for evaluation, False if read only GT
        opts.upsample_output = False  # upsample network output
        opts.start, opts.step, opts.end = 1, 1, 1000  # frame
        opts.train_idx, opts.test_idx = [], []
        opts.gt_phi = 0.0
        opts.dtype = 'nogt'
        opts.use_rgb = False

        # first update opts using pre-defined config
        # also load ocam parameters
        opts, self.ocams = utils.dbhelper.loadDBConfigs(
            self.dbname, self.db_path, opts)
        # update opts from the argument
        opts = argparse(opts, db_opts)

        # set member variables
        self.opts = opts
        self.img_fmt, self.lut_fmt = opts.img_fmt, opts.lut_fmt
        self.gt_depth_fmt = opts.gt_depth_fmt
        self.frame_idx = list(
            range(opts.start, opts.end + opts.step, opts.step))
        self.train_idx, self.test_idx = opts.train_idx, opts.test_idx
        self.gt_phi = opts.gt_phi
        self.dtype = opts.dtype
        self.use_rgb = opts.use_rgb

        self.equirect_size = opts.equirect_size
        self.min_depth, self.max_depth = opts.min_depth, opts.max_depth
        self.max_theta = np.deg2rad(opts.max_fov) / 2.0
        self.phi_deg, self.phi2_deg = opts.phi_deg, opts.phi2_deg
        self.num_invdepth = opts.num_invdepth
        self.read_input_image = opts.read_input_image
        self.upsample_output = opts.upsample_output
        self.data_size = len(self.frame_idx)
        self.train_size = len(self.train_idx)
        self.test_size = len(self.test_idx)
        self.__initSweep(load_lut)
        self.train = train
예제 #2
0
 def loadSample(self, fidx: int, read_input_image=True, varargin=None):
     opts = Edict()
     opts.remove_gt_noise = True
     opts.morph_win_size = 5
     opts = argparse(opts, varargin)
     imgs, raw_imgs = [], []
     if read_input_image:
         imgs, raw_imgs = self.loadImages(fidx, True, use_rgb=self.use_rgb)
     gt, valid = [], []
     if self.dtype == 'gt':
         gt = self.loadGTInvdepthIndex(fidx, opts.remove_gt_noise,
                                       opts.morph_win_size)
         valid = np.logical_and(gt >= 0,
                                gt <= self.num_invdepth).astype(np.bool)
     return imgs, gt, valid, raw_imgs
예제 #3
0
def get_config():
    conf = Edict()
    conf.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    conf.confidence_threshold = 0.5
    conf.top_k = 5000
    conf.nms_threshold = 0.4
    conf.keep_top_k = 750
    conf.trained_model = "detection/weights/mobilenet0.25_new.pth"
    conf.network = "mobile0.25"  # or resnet50
    if (conf.device == torch.device("cpu")):
        conf.cpu = True
        print("use cpu!")
    else:
        conf.cpu = False
        print("use gpu!")
    return conf
예제 #4
0
def get_config():
    conf = Edict()
    conf.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # conf.device=torch.device("cpu")
    conf.dataroot = "data"
    conf.val_batch_size = 64
    conf.train_batch_size = 64
    conf.input_size = 112
    conf.workers = 4
    conf.num_nodes = 2
    conf.local_rank = -1
    # conf.back_bone="mobiv3"
    conf.loss = "cross_entropy"
    conf.back_bone = "efficient"
    conf.learning_rate = 0.01
    conf.start_step = 0

    conf.save_model = conf.back_bone
    conf.resume = conf.save_model
    conf.pretrained = False
    conf.num_classes = 7

    # mixup
    conf.mixup = 0.1
    conf.step = 40  # decrease learning rate each time
    conf.smooth_eps = 0.1
    conf.mixup_warmup = 455

    #optimizer_params
    conf.min_lr = 2.5e-6
    conf.max_lr = 0.225
    conf.sched = "clr"
    conf.decay = 2.5e-7
    conf.momentum = 0.9

    #epochs
    conf.start_epoch = 0
    conf.epochs = 100000
    conf._dtype = torch.float16

    conf.mode = "triangular2"

    return conf
예제 #5
0
import torch.nn as nn
import torch.nn.functional as F
import torch
from backbone_block import basic_cnn
from utils_frequent.init_net import xavier_init
from easydict import EasyDict as Edict
"""
inputs:
    输入的一个特征图 
可以修改的地方、可以加上选项 
bottomUp feature map 到 topDown feature map 的卷积形式
topDown 下采样的方式,目前是插值

"""

cfg = Edict()
cfg.featureMapNum = 3  #总共多少特征图输入, 经过几层
cfg.bottomUpChannelsNum = [58, 116, 232, 464
                           ]  #bootomUp每层的通道数, 第0个是输入特征图的featuremap, 后面几层是金字塔
cfg.topDownChannelsNum = 96
cfg.bottomUp2ChannelsNum = 96
cfg.outChannelsNum = [96, 96, 96]  #FPN每层输出的channel 数目


class PAN(nn.Module):
    def __init__(self, cfg):
        super(PAN, self).__init__()
        self.featureMapNum = cfg.featureMapNum

        self.bottomUpChannelsNum = cfg.bottomUpChannelsNum
        self.topDownChannelsNum = cfg.topDownChannelsNum
예제 #6
0
from net.shuffleNetV2 import ShuffleNetV2
from net.PAN_simplest import nanodet_PAN
from net.header import Head
from torch import nn

from easydict import EasyDict as Edict
cfgMe = Edict()
cfgMe.modelSize = '1.0x'
cfgMe.activation = 'LeakReLu'


class NanoNet(nn.Module):
    def __init__(self, classNum, imgChannelNumber, regBoxNum):
        super(NanoNet, self).__init__()
        self.backbone = ShuffleNetV2(imgChannelNumber,
                                     model_size='1.0x',
                                     activation='LeakyReLU')
        self.neck = nanodet_PAN(  #featureMapNum=3,
            #bottomUpChannelsNum =[116,232,464],
            #topDownChannelsNum = 96,
            #bottomUp2ChannelsNum = 96,
            #outChannelsNum = 96
            inputChanNum=[116, 232, 464],
            chanNum=96)
        self.clsNum = classNum
        self.regBoxNum = regBoxNum
        self.head = nn.ModuleList()
        for i in range(3):
            h = Head(
                reg_max=self.
                regBoxNum,  # defalut =8个bbox,用于分布, general focal loss format
예제 #7
0
from easydict import EasyDict as Edict
import argparse

edict = Edict()

edict.data = Edict()
edict.data.LAPSFormat = "/home/aiw/mntData/data168/data/SR_LAPS1H/%s/%s/MSP1_PMSC_AIWSRPF_LAPS-1H-0p01_L88_CZ_%s00_00000-00000.nc"
edict.data.LAPSNPY = './dataset/InputData/%s.npy'
edict.data.GRAPESMESOFormat = "/home/aiw/mntData/data47/data/AIWSLL/GRAPESMESOORI/%s/%s/MSP2_PMSC_AIWSR%sF_GRAPESMESOORI-0P03-1H_L88_CZ_%s00_00000-01200.nc"
edict.data.GRAPESMESOVariable = '%s-component_of_wind_height_above_ground'
edict.data.GRAPESMESONPY = './dataset/TestData/%s.npy'

edict.checkpoint = Edict()
edict.checkpoint.cpkRoot = './checkpoint_%s'


def options():
    parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
    parser.add_argument('--batchSize',
                        type=int,
                        default=32,
                        help='training batch size')
    parser.add_argument('--nEpochs',
                        type=int,
                        default=40,
                        help='number of epochs to train for')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        help='Learning Rate. Default=0.001')
    parser.add_argument('--seed',