def __init__(self, args): model = edict() with tf.Graph().as_default(): config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.2 sess = tf.Session(config=config) #sess = tf.Session() with sess.as_default(): self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(sess, None) self.threshold = args.threshold self.det_minsize = 50 self.det_threshold = [0.4,0.6,0.6] self.det_factor = 0.9 _vec = args.image_size.split(',') assert len(_vec)==2 self.image_size = (int(_vec[0]), int(_vec[1])) _vec = args.model.split(',') assert len(_vec)==2 prefix = _vec[0] epoch = int(_vec[1]) print('loading',prefix, epoch) self.model = edict() self.model.ctx = mx.gpu(args.gpu) self.model.sym, self.model.arg_params, self.model.aux_params = mx.model.load_checkpoint(prefix, epoch) self.model.arg_params, self.model.aux_params = ch_dev(self.model.arg_params, self.model.aux_params, self.model.ctx) all_layers = self.model.sym.get_internals() self.model.sym = all_layers['fc1_output']
def get_basic_paths(): paths = edict() paths.base = edict() paths.base.dr = '/mnt/HardDrive/common' paths.vatic = edict() paths.vatic.dr = '/home/nestsense/vatic' return paths
def __init__(self): mnistPath = osp.join(DATASET_PATH, 'mnist') self.pths_ = edict() self.pths_.train = edict() self.pths_.test = edict() self.pths_.train.ims = osp.join(mnistPath, 'train-images-idx3-ubyte') self.pths_.train.lb = osp.join(mnistPath, 'train-labels-idx1-ubyte') self.pths_.test.ims = osp.join(mnistPath, 't10k-images-idx3-ubyte') self.pths_.test.lb = osp.join(mnistPath, 't10k-labels-idx1-ubyte')
def get_mnist_paths(): paths = edict() #Path to store experiment details paths.exp = edict() paths.exp.dr = './test_data/mnist/exp' #Paths to store snapshot details paths.exp.snapshot = edict() paths.exp.snapshot.dr = './test_data/mnist/snapshots' return paths
def __init__(self, prms=edict()): dArgs = edict() dArgs.folderName = 'nicks-house' dArgs.subFolderName = 'Angle1Lighting1' prms = cu.get_defaults(prms, dArgs, True) GetDataDir.__init__(self, prms) pths = get_datadirs() self.prms_.dirName = pths.vatic % (self.prms_.folderName, self.prms_.subFolderName)
def __init__(self, prms=edict()): dArgs = edict() dArgs.setName = 'val' ch.ChainObject.__init__(self, prms) self.mpiiDir_ = osp.join(pths.dsets, 'mpii') dFile = osp.join(self.mpiiDir_, 'annotations', '%s_data.pkl') dFile = dFile % self.prms['setName'] self.data_ = pickle.load(open(dFile, 'r')) self.N_ = len(self.data_['kpts']) self.count_ = 0
def __init__(self, **lPrms): #The layer parameters - these can #be different for different layers for n in lPrms: if hasattr(self,n): setattr(self,n,lPrms[n]) else: raise Exception( "Attribute '%s' not found"%n ) #The gradients wrt to the parameters and the bottom self.grad_ = edict() #Storing the weights and other stuff self.prms_ = edict()
def all(self): """ Instanciate objects on the fly We use edict() in order to recursively transform dicts into attributes. (ex.: object['properties']['districts'][0]['pk'] becomes object.properties.districts[0].pk) """ objects = self._read_content() if isinstance(objects, (list, tuple)): return [self.klass(objects=self, **edict(o)) for o in objects] assert isinstance(objects, dict) return self.klass(objects=self, **edict(objects))
def caffe_model_paths(): paths = get_basic_paths() paths.caffemodel = edict() paths.caffemodel.dr = osp.join(paths.base.dr, 'caffe_models') #Faster-rcnn model paths.caffemodel.fasterrcnn = edict() paths.caffemodel.fasterrcnn.dr = osp.join(paths.caffemodel.dr, 'faster_rcnn_models') #imagenet model dr paths.caffemodel.imagenet = edict() paths.caffemodel.imagenet.dr = osp.join(paths.caffemodel.dr, 'imagenet_models') return paths
def setUp(self): self.trek = Trek() self.trek.properties = edict({ 'name': 'its name', 'departure': 'départ', 'arrival': 'made in a z small world', 'ambiance': "it's like like like, you know. VERY good.", 'advice': """do not take this way""", 'cities': [edict({'name': 'triffouilli', 'code': '12345'})], 'districts': [edict({'name': 'secteur'})], 'pois': [edict({'name': 'jonquille', 'description': 'desc', 'type': 'fleur'})], }) self.fulltext = self.trek.fulltext
def convert_to_deepcut(conf): conf = edict(conf.__dict__) conf.all_joints = [] conf.all_joints_names = [] for ndx in range(conf.n_classes): conf.all_joints.append([ndx]) conf.all_joints_names.append('part_{}'.format(ndx)) conf.dataset = os.path.join(conf.cachedir,conf.dlc_train_data_file) conf.global_scale = 1./conf.dlc_rescale conf.num_joints = conf.n_classes conf.scale_jitter_lo = 0.9 conf.scale_jitter_up = 1.1 conf.net_type = 'resnet_50' conf.pos_dist_thresh = 17 conf.max_input_size = 1000 conf.intermediate_supervision = False conf.intermediate_supervision_layer = 12 conf.location_refinement = True conf.locref_huber_loss = True conf.locref_loss_weight = 0.05 conf.locref_stdev = 7.2801 conf.mirror = False _merge_a_into_b(conf, cfg) return cfg
def get_pred_fn(cfg, model_file=None): cfg = edict(cfg.__dict__) cfg = config.convert_to_deepcut(cfg) if model_file is None: ckpt_file = os.path.join(cfg.cachedir,cfg.expname + '_' + name + '_ckpt') latest_ckpt = tf.train.get_checkpoint_state( cfg.cachedir, ckpt_file) init_weights = latest_ckpt.model_checkpoint_path else: init_weights = model_file tf.reset_default_graph() sess, inputs, outputs = predict.setup_pose_prediction(cfg, init_weights) def pred_fn(all_f): if cfg.imgDim == 1: cur_im = np.tile(all_f,[1,1,1,3]) else: cur_im = all_f cur_out = sess.run(outputs, feed_dict={inputs: cur_im}) scmap, locref = predict.extract_cnn_output(cur_out, cfg) pose = predict.argmax_pose_predict(scmap, locref, cfg.stride) pose = pose[:,:,:2]*cfg.dlc_rescale return pose, scmap def close_fn(): sess.close() return pred_fn, close_fn, model_file
def update_config(config_file): exp_config = None with open(config_file) as f: exp_config = edict(yaml.load(f)) for k, v in exp_config.items(): if k in config: if isinstance(v, dict): if k == 'TRAIN': if 'BBOX_WEIGHTS' in v: v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS']) elif k == 'network': if 'PIXEL_MEANS' in v: v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS']) for vk, vv in v.items(): config[k][vk] = vv else: if k == 'SCALES': if type(v)!=list: config[k][0] = (tuple(v)) else: config[k] = v else: config[k] = v else: raise ValueError("key must exist in config.py")
def getAllVehicleType(parkingIdentificationCode): request = __getAllVehicleTypeRequest(parkingIdentificationCode) dataEncrypted = encrypt.encrypted(object2json(request)) response = sendRequest(URL_FIND + VEHICLE_TYPE_REQUEST, dataEncrypted) logging.info(response) data = json.loads(response) return json2object(edict(data))
def getAllParkingRates(parkingIdentificationCode): request = __getAllParkingRatesRequest(parkingIdentificationCode) dataEncrypted = encrypt.encrypted(object2json(request)) response = sendRequest(URL_FIND + PARKING_RATES_REQUEST, dataEncrypted) logging.info(response) data = json.loads(response) return json2object(edict(data))
def get_dataset_common(input_dir, min_images = 1): ret = [] label = 0 person_names = [] for person_name in os.listdir(input_dir): if len(os.listdir(os.path.join(input_dir, person_name)))>10: person_names.append(person_name) person_names = sorted(person_names) for person_name in person_names: _subdir = os.path.join(input_dir, person_name) if not os.path.isdir(_subdir): continue _ret = [] for img in os.listdir(_subdir): fimage = edict() fimage.id = os.path.join(person_name, img) fimage.classname = str(label) fimage.image_path = os.path.join(_subdir, img) fimage.bbox = None fimage.landmark = None _ret.append(fimage) if len(_ret)>=min_images: ret += _ret label+=1 return ret
def cfg_from_file(filename): """Load a config file and merge it into the default options.""" import yaml with open(filename, 'r') as f: yaml_cfg = edict(yaml.load(f)) _merge_a_into_b(yaml_cfg, __C)
def get_imdb_other(name): imdb = edict() import scipy.io as sio tmp = sio.loadmat('./obret/data/TV_ANIMAL_image_ids.mat')['image_ids'] imdb.image_index = [image_id[0][0] for image_id in tmp] return imdb
def get_experiment_paths(coco_path): paths = edict() # COCO paths paths.coco_path = coco_path paths.coco_annotations = '%s/annotations' % coco_path paths.coco_images = '%s/images' % coco_path paths.coco_proposals = '%s/proposals/MCG/mat' % coco_path # Google Refexp path paths.google_refexp = '%s/google_refexp' % coco_path # UNC Refexp path paths.unc_refexp = '%s/unc_refexp' % coco_path # Cache dir cache_dir = '%s/cache_dir' % coco_path paths.h5_data = '%s/h5_data' % cache_dir paths.models = '%s/models' % cache_dir paths.pickles = '%s/pickles' % cache_dir paths.precomputed_image_features = '%s/precomputed_image_features' % cache_dir paths.retrieval_results = '%s/comprehension_results' % cache_dir for p in [paths.h5_data, paths.models, paths.pickles, paths.precomputed_image_features, paths.retrieval_results]: if not os.path.exists(p): os.makedirs(p) return paths
def produce(self, ip): scores, bbox = im_detect(self.net_, ip) #Find the top class for each box bestClass = np.argmax(scores,axis=1) bestScore = np.max(scores, axis=1) allDet = edict() for cl in [self.prms_.targetClass]: clsIdx = self.cls_.index(cl) #Get all the boxes that belong to the desired class idx = bestClass == clsIdx clScore = bestScore[idx] clBox = bbox[idx,:] #Sort the boxes by the score sortIdx = np.argsort(-clScore) topK = min(len(sortIdx), self.prms_.topK) sortIdx = sortIdx[0:topK] #Get the desired output clScore = clScore[sortIdx] clBox = clBox[sortIdx] clBox = clBox[:, (clsIdx * 4):(clsIdx*4 + 4)] #Stack detections and perform NMS dets=np.hstack((clBox, clScore[:,np.newaxis])).astype(np.float32) keep = nms(dets, self.prms_.nmsThresh) dets = dets[keep, :] #Only keep detections with high confidence inds = np.where(dets[:, -1] >= self.prms_.confThresh)[0] allDet[cl] = copy.deepcopy(dets[inds]) return allDet
def get_imdb_cm(name): with open('./static/CM_2009_08/CM_2009_08_id.txt', 'r') as f: ids_str = f.read() ids_arr = ids_str.split('\n') imdb = edict() imdb.image_index = ids_arr return imdb
def get_caffe_net_files(netName): paths = caffe_model_paths() oPrms = edict() #Caffe Model File oPrms.netFile = '' #Deploy prototxt oPrms.deployFile = '' #Solver prototxt oPrms.solFile = '' #Train file oPrms.defFile = '' if netName == 'vgg16': oPrms.netFile = 'VGG16.caffemodel' oPrms.defFile = '' baseDr = paths.caffemodel.imagenet.dr elif netName == 'vgg16-pascal-rcnn': oPrms.netFile = 'VGG16_faster_rcnn_final.caffemodel' oPrms.deployFile = 'test.prototxt' oPrms.solFile = 'solver.prototxt' oPrms.defFile = 'train.prototxt' baseDr = osp.join(paths.caffemodel.fasterrcnn.dr,'VGG16') elif netName == 'zf-pascal-rcnn': oPrms.netFile = 'ZF_faster_rcnn_final.caffemodel' oPrms.defFile = '' baseDr = paths.caffemodel.fasterrcnn.dr elif netName == 'vgg16-coco-rcnn': oPrms.netFile = 'coco_vgg16_faster_rcnn_final.caffemodel' oPrms.deployFile = 'test.prototxt' oPrms.solFile = 'solver.prototxt' oPrms.defFile = 'train.prototxt' baseDr = osp.join(paths.caffemodel.fasterrcnn.dr, 'coco') for k in oPrms.keys(): oPrms[k] = osp.join(baseDr, oPrms[k]) return oPrms
def get_single_setup(s, n): return edict({ 'path':join(ROOT, 'build/data/order/.tmp'), 'data':critical_orders[s-1], 'num':n, 'id':s })
def build_nd_dict(self, lengthscale, densityscale, gravityscale, tempscale): self.ndp = edict({}) self.ndp.name = self.dp.name self.ndp.depths = [i/lengthscale for i in self.dp.depths] self.ndp.temps = [i/tempscale for i in self.dp.temps] self.ndp.widths = [i/lengthscale for i in self.dp.widths] self.ndp.claps = [(i*(tempscale/(densityscale*gravityscale*lengthscale))) for i in self.dp.claps]
def getCostosDeProduccion(fechaDesde, fechaHasta): materiaPrima = getMateriaPrima(fechaDesde, fechaHasta) manoDeObra = getManoDeObra(fechaDesde, fechaHasta) costosIndirectos = getCostosIndirectos(fechaDesde, fechaHasta) depreciaciones = getDepreciaciones(fechaDesde, fechaHasta) return edict({'total': materiaPrima.total + manoDeObra.total + costosIndirectos.total + depreciaciones, 'materiaPrima':materiaPrima , 'manoDeObra':manoDeObra , 'costosIndirectos':costosIndirectos , 'depreciaciones':depreciaciones})
def test_grid(self): mb = MBTiles('geography-class') tile = mb.grid(3, 4, 2) h = hashlib.md5(tile).hexdigest() self.failUnlessEqual('8d9cf7d9d0bef7cc1f0a37b49bf4cec7', h) p = re.compile("grid\((.+)\);") self.failUnless(p.match(tile)) utfgrid = p.match(tile).group(1) utfgrid = edict(simplejson.loads(utfgrid)) self.failUnlessEqual(utfgrid.grid[20:30], [u' !!!!!!!!!!!######### & $$$$$ %%%%%%%%%%%%%%%%%%%%%%%', u' !!!!!!!!!########### $ %%%%%%%%%%%%%%%%%%%%%%%', u" !!!!!!!!!######## # '''' %%%%%%%%%%%%%%%%%%%%%%%%", u" !!!!!! ########### ' ''''''%%%%%%%%%%%%%%%%%%%%%%%%", u" !!!!!! ######### ' '''''%%%%%%%%%%%%%%%%%%%%%%%%%", u" !!!! ######## ''''''''%%%%%%%%%%%%%%%%%%%%%%%%%", u" !! ####### (('''%%%%%%%%%%%%%%%%%%%%%%%%", u" ) ####### # ( ((('%%%%%%%%%%%%%%%%%%%%%%%%%", u' ) ######## # (( (((((%%%%%%%%%%%%%%%%%%%%%%%%', u' ))) ###### ((((((((((%%%%%%%%%%%%%%%%%%%%%%%%']) c = ord('#') + 32 if c >= 92: c = c + 1 if c >= 34: c = c + 1 self.failUnlessEqual(utfgrid.data[str(c)]['ADMIN'], 'Estonia') self.failUnlessEqual(utfgrid.data[str(c)]['POP_EST'], 1299371)
def get_dataset_ytf(input_dir): ret = [] label = 0 person_names = [] for person_name in os.listdir(input_dir): person_names.append(person_name) person_names = sorted(person_names) for person_name in person_names: _subdir = os.path.join(input_dir, person_name) if not os.path.isdir(_subdir): continue for _subdir2 in os.listdir(_subdir): _subdir2 = os.path.join(_subdir, _subdir2) if not os.path.isdir(_subdir2): continue _ret = [] for img in os.listdir(_subdir2): fimage = edict() fimage.id = os.path.join(_subdir2, img) fimage.classname = str(label) fimage.image_path = os.path.join(_subdir2, img) fimage.bbox = None fimage.landmark = None _ret.append(fimage) ret += _ret label+=1 return ret
def get_default_solver_prms(dbFile=DEF_DB, **kwargs): ''' Refer to caffe.proto for a description of the variables. ''' dArgs = edict() dArgs.baseSolDefFile = None dArgs.iter_size = 1 dArgs.max_iter = 250000 dArgs.base_lr = 0.001 dArgs.lr_policy = 'step' dArgs.stepsize = 20000 dArgs.gamma = 0.5 dArgs.weight_decay = 0.0005 dArgs.clip_gradients = -1 #Momentum dArgs.momentum = 0.9 #Other dArgs.regularization_type = 'L2' dArgs.random_seed = -1 #Testing info dArgs.test_iter = 100 dArgs.test_interval = 1000 dArgs.snapshot = 2000 dArgs.display = 20 #Update parameters dArgs = mpu.get_defaults(kwargs, dArgs, False) dArgs.expStr = 'solprms' + get_sql_id(dbFile, dArgs, ignoreKeys=['test_iter', 'test_interval', 'snapshot', 'display']) return dArgs
def prepareData(mainPath, stopIter = 0, overfittingThreshold = 0): __C = edict() if DATASET == datasetList[0]: __C.SAVE_FILE = 'aPaYResults.txt' __C.VISUAL_DATA = 'visual/aPaY/' __C.LEARNED_MODEL_PATH = 'models/'+applyLossType+'/aPaY/' __C.WORD_VECTORS = 'wordVectors/aPaY/' __C.LOG_FILE = 'aPaYLog.txt' elif DATASET == datasetList[1]: __C.SAVE_FILE = 'AwAResults.txt' __C.VISUAL_DATA = 'visual/AwA/' __C.WORD_VECTORS = 'wordVectors/AwA/' __C.LEARNED_MODEL_PATH = 'models/'+applyLossType+'/AwA/' __C.LOG_FILE = 'AwALog.txt' elif DATASET == datasetList[2]: __C.SAVE_FILE = 'CUBResults.txt' __C.VISUAL_DATA = 'visual/CUB/' __C.LEARNED_MODEL_PATH = 'models/'+applyLossType+'/CUB/' __C.WORD_VECTORS = 'wordVectors/CUB/' __C.LOG_FILE = 'CUBLog.txt' else: pass if applyCrossValidation == False: __C.SAVE_MODEL = True else: __C.SAVE_MODEL = False __C.NUM_EPOCH = 1000000 __C.NUM_HIDDEN = [100]#[100, 200, 300, 400, 500] __C.VERBOSE = True __C.PERTURBED_EXAMPLES = False __C.PERTURBED_EXAMPLE_CORRLEVEL = 5 __C.MAX_BATCH_SIZE = 64 __C.NUMBER_OF_FOLD = 2 __C.PLOT_ACC_PER_N_ITER = 100 __C.OVERFITTING_THRESHOLD = overfittingThreshold __C.CV_PATH = DATASET + '/cv_data/' __C.TMP_FILENAME = tmpFileName if stopIter == 0: __C.STOP_ITER = __C.NUM_EPOCH else: __C.STOP_ITER = stopIter __C.TRAIN_CLASS_PATH = mainPath +'/'+languageModel+ '/trainClasses.mat' __C.TEST_CLASS_PATH = mainPath +'/'+languageModel+ '/testClasses.mat' __C.ATTRIBUTE_VECTOR_PATH = mainPath +'/'+languageModel+ '/attributeVectors.mat' __C.PREDICATE_MATRIX_PATH = mainPath + '/predicateMatrix.mat' __C.ATTR_CLASSIFIER_RESULTS_PATH = mainPath +'/'+networkModel+ '/attClassifierResults.mat' __C.GROUND_TRUTH_LABELS = mainPath + '/groundTruthLabels.mat' __C.TRAIN_IMAGE_LABELS = mainPath + '/trainImageLabels.mat' __C.TRAIN_SCORES = mainPath +'/'+networkModel+'/trainScores.mat' return __C
def handle(*arguments, **kwargs): if len(arguments) > 1 and arguments[1]: raise arguments[1] callback(edict({ "count": int(arguments[0]['n']), "updated_existing": arguments[0]['updatedExisting'] }))
class Config: # -------- Directoy Config -------- # USER = getpass.getuser() ROOT_DIR = os.environ['MSPN_HOME'] OUTPUT_DIR = osp.join(ROOT_DIR, 'model_logs', USER, osp.split(osp.split(osp.realpath(__file__))[0])[1]) TEST_DIR = osp.join(OUTPUT_DIR, 'test_dir') TENSORBOARD_DIR = osp.join(OUTPUT_DIR, 'tb_dir') # -------- Data Config -------- # DATALOADER = edict() DATALOADER.NUM_WORKERS = 1 DATALOADER.ASPECT_RATIO_GROUPING = False DATALOADER.SIZE_DIVISIBILITY = 0 DATASET = edict() DATASET.NAME = 'MPII' dataset = load_dataset(DATASET.NAME) DATASET.KEYPOINT = dataset.KEYPOINT INPUT = edict() INPUT.NORMALIZE = True INPUT.MEANS = [0.406, 0.456, 0.485] # bgr INPUT.STDS = [0.225, 0.224, 0.229] # edict will automatcally convert tuple to list, so .. INPUT_SHAPE = dataset.INPUT_SHAPE OUTPUT_SHAPE = dataset.OUTPUT_SHAPE # -------- Model Config -------- # MODEL = edict() MODEL.BACKBONE = 'Res-50' MODEL.UPSAMPLE_CHANNEL_NUM = 256 MODEL.STAGE_NUM = 2 MODEL.OUTPUT_NUM = DATASET.KEYPOINT.NUM MODEL.DEVICE = 'cuda' MODEL.WEIGHT = osp.join(ROOT_DIR, 'lib/models/resnet-50_rename.pth') # -------- Training Config -------- # SOLVER = edict() SOLVER.BASE_LR = 5e-4 SOLVER.CHECKPOINT_PERIOD = 1600 SOLVER.GAMMA = 0.5 SOLVER.IMS_PER_GPU = 32 SOLVER.MAX_ITER = 28800 SOLVER.MOMENTUM = 0.9 SOLVER.OPTIMIZER = 'Adam' SOLVER.WARMUP_FACTOR = 0.1 SOLVER.WARMUP_ITERS = 2400 SOLVER.WARMUP_METHOD = 'linear' SOLVER.WEIGHT_DECAY = 1e-5 SOLVER.WEIGHT_DECAY_BIAS = 0 LOSS = edict() LOSS.OHKM = True LOSS.TOPK = 8 LOSS.COARSE_TO_FINE = True RUN_EFFICIENT = True # -------- Test Config -------- # TEST = dataset.TEST TEST.IMS_PER_GPU = 32
def get_data(self): ''' Retrieves data in a format that can be used in training by loading in batches. Returns ------- obj Dictionary of data-related information. ''' TEXT = Field(tokenize=self.tokenizer, lower=True) KEY = Field(sequential=False, use_vocab=False) # read processed data to csv file df = pd.read_csv(self.proc_dataroot) # split dataset into train and validation and save to seperate files train, valid = train_test_split(df, test_size=0.2) train.to_csv(getcwd() + '/data/processed/navinstr_train.csv', index=False) valid.to_csv(getcwd() + '/data/processed/navinstr_valid.csv', index=False) datafields = [('text', TEXT), ('key', KEY)] train_set, valid_set = TabularDataset.splits( path=getcwd() + '/data/processed', train='navinstr_train.csv', validation='navinstr_valid.csv', format='csv', fields=datafields, skip_header=True) TEXT.build_vocab(train_set, valid_set) vocab_size = len(TEXT.vocab) # torchtext backprop through time iterator train_iter, valid_iter = BPTTIterator.splits( (train_set, valid_set), batch_size=self.config.batch_size, bptt_len=8, device=self.device, repeat=False, shuffle=True) # train_loader = Batch(dl=train_iter, x_var='text') # valid_loader = Batch(dl=valid_iter, x_var='text') train_loader = MultiColumnBatch(dl=train_iter, x_var='text', y_var='key') valid_loader = MultiColumnBatch(dl=valid_iter, x_var='text', y_var='key') data_dict = edict({ 'train_loader': train_loader, 'valid_loader': valid_loader, 'train_iter': train_iter, 'vocab_size': vocab_size, 'vocab': TEXT.vocab }) return data_dict
#!/usr/bin/env python3 """ Set global configuration """ from easydict import EasyDict as edict __C = edict() # Consumers can get config by: from config import cfg cfg = __C # SDK参数 __C.SDCSDK = edict() # SDK路径 __C.SDCSDK.SDKPATH = 'selfDrive' # __C.SDCSDK.SDKPATH='E:\githubCodeSpace\selfDrive' # 分辨率,图像参数 __C.FRAME = edict() # 设置图像宽度 __C.FRAME.WIDTH = 512 # 设置图像高度 __C.FRAME.HEIGHT = 256 # 设置鸟瞰图, 摄像头外参数 __C.BIRDVIEW = edict() # 平视角上边沿缩进长度 __C.BIRDVIEW.upInsideOffset = 35 # 平视角下边沿扩展长度 __C.BIRDVIEW.downOutsideOffset = 100 # 地平线距离屏幕下方长度
def main(argv): # ----------------------------------------- # parse arguments # ----------------------------------------- opts, args = getopt(argv, '', ['config=', 'restore=']) # defaults conf_name = None restore = None # read opts for opt, arg in opts: if opt in ('--config'): conf_name = arg if opt in ('--restore'): restore = int(arg) # required opt if conf_name is None: raise ValueError('Please provide a configuration file name, e.g., --config=<config_name>') # ----------------------------------------- # basic setup # ----------------------------------------- conf = init_config(conf_name) paths = init_training_paths(conf_name) init_torch(conf.rng_seed, conf.cuda_seed) init_log_file(paths.logs) vis = init_visdom(conf_name, conf.visdom_port) # vis = None # defaults start_iter = 0 tracker = edict() iterator = None has_visdom = vis is not None dataset = Dataset(conf, paths.data, paths.output) generate_anchors(conf, dataset.imdb, paths.output) compute_bbox_stats(conf, dataset.imdb, paths.output) # ----------------------------------------- # store config # ----------------------------------------- # store configuration pickle_write(os.path.join(paths.output, 'conf.pkl'), conf) # show configuration pretty = pretty_print('conf', conf) logging.info(pretty) # ----------------------------------------- # network and loss # ----------------------------------------- # training network rpn_net, optimizer = init_training_model(conf, paths.output) # setup loss criterion_det = RPN_3D_loss(conf) # criterion_det = RPN_3D_multi_view_loss(conf) # criterion_det = RPN_3D_multi_video(conf) # criterion_det = DisentanglingLoss(conf) # custom pretrained network if 'pretrained' in conf: load_weights(rpn_net, conf.pretrained) # resume training if restore: start_iter = (restore - 1) resume_checkpoint(optimizer, rpn_net, paths.weights, restore) freeze_blacklist = None if 'freeze_blacklist' not in conf else conf.freeze_blacklist freeze_whitelist = None if 'freeze_whitelist' not in conf else conf.freeze_whitelist freeze_layers(rpn_net, freeze_blacklist, freeze_whitelist) optimizer.zero_grad() start_time = time() # ----------------------------------------- # train # ----------------------------------------- for iteration in range(start_iter, conf.max_iter): # next iteration if conf.flow: iterator, images, flows, imobjs = next_iteration(dataset.loader, iterator, conf) else: iterator, images, imobjs = next_iteration(dataset.loader, iterator, conf) # learning rate adjust_lr(conf, optimizer, iteration) # forward if conf.flow: cls, prob, bbox_2d, bbox_3d, feat_size = rpn_net([images, flows]) else: cls, prob, bbox_2d, bbox_3d, feat_size = rpn_net(images[:, :3, ::]) # loss det_loss, det_stats = criterion_det(cls, prob, bbox_2d, bbox_3d, imobjs, feat_size) total_loss = det_loss stats = det_stats # backprop if total_loss > 0: total_loss.backward() # batch skip, simulates larger batches by skipping gradient step if (not 'batch_skip' in conf) or ((iteration + 1) % conf.batch_skip) == 0: optimizer.step() optimizer.zero_grad() # keep track of stats compute_stats(tracker, stats) # ----------------------------------------- # display # ----------------------------------------- if (iteration + 1) % conf.display == 0 and iteration > start_iter: # log results log_stats(tracker, iteration, start_time, start_iter, conf.max_iter) # display results if has_visdom: display_stats(vis, tracker, iteration, start_time, start_iter, conf.max_iter, conf_name, pretty) # reset tracker tracker = edict() # ----------------------------------------- # test network # ----------------------------------------- if (iteration + 1) % conf.snapshot_iter == 0 and iteration > start_iter: # store checkpoint save_checkpoint(optimizer, rpn_net, paths.weights, (iteration + 1)) if conf.do_test: # eval mode rpn_net.eval() # necessary paths results_path = os.path.join(paths.results, 'results_{}'.format((iteration + 1))) # ----------------------------------------- # test kitti # ----------------------------------------- if conf.test_protocol.lower() == 'kitti': # delete and re-make results_path = os.path.join(results_path, 'data') mkdir_if_missing(results_path, delete_if_exist=True) test_kitti_3d(conf.dataset_test, rpn_net, conf, results_path, paths.data) else: logging.warning('Testing protocol {} not understood.'.format(conf.test_protocol)) # train mode rpn_net.train() freeze_layers(rpn_net, freeze_blacklist, freeze_whitelist)
from __future__ import division from __future__ import print_function import os.path as osp import numpy as np from easydict import EasyDict as edict __C = edict() cfg = __C # Dataset name: flowers, birds __C.DATASET_NAME = 'bookcover' __C.CONFIG_NAME = '' __C.DATA_DIR = '' __C.GPU_ID = 0 __C.CUDA = False __C.WORKERS = 6 __C.RNN_TYPE = 'LSTM' # 'GRU' __C.B_VALIDATION = False __C.TREE = edict() __C.TREE.BRANCH_NUM = 3 __C.TREE.BASE_SIZE = 64 # Training options __C.TRAIN = edict() __C.TRAIN.BATCH_SIZE = 64 __C.TRAIN.MAX_EPOCH = 600 __C.TRAIN.SNAPSHOT_INTERVAL = 2000 __C.TRAIN.DISCRIMINATOR_LR = 2e-4
def main(args): global image_shape, net, env, glargs print(args) glargs = args env = lmdb.open( args.input + '/imgs_lmdb', readonly=True, # max_readers=1, lock=False, # readahead=False, meminit=False ) ctx = [] cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip() if len(cvd) > 0: for i in xrange(len(cvd.split(','))): ctx.append(mx.gpu(i)) if len(ctx) == 0: ctx = [mx.cpu()] print('use cpu') else: print('gpu num:', len(ctx)) image_shape = [int(x) for x in args.image_size.split(',')] if use_mxnet: net = edict() vec = args.model.split(',') assert len(vec) > 1 prefix = vec[0] epoch = int(vec[1]) print('loading', prefix, epoch) net.ctx = ctx net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint( prefix, epoch) # net.arg_params, net.aux_params = ch_dev(net.arg_params, net.aux_params, net.ctx) all_layers = net.sym.get_internals() net.sym = all_layers['fc1_output'] net.model = mx.mod.Module(symbol=net.sym, context=net.ctx, label_names=None) net.model.bind(data_shapes=[('data', (args.batch_size, 3, image_shape[1], image_shape[2]))]) net.model.set_params(net.arg_params, net.aux_params) else: # sys.path.insert(0, lz.home_path + 'prj/InsightFace_Pytorch/') from config import conf from Learner import FaceInfer conf.need_log = False conf.batch_size = args.batch_size conf.fp16 = True conf.ipabn = False conf.cvt_ipabn = False conf.use_chkpnt = False net = FaceInfer(conf, gpuid=range(conf.num_devs)) net.load_state( resume_path=args.model, latest=False, ) net.model.eval() features_all = None i = 0 fstart = 0 buffer = [] for line in open(os.path.join(args.input, 'filelist.txt'), 'r'): if i % 1000 == 0: print("processing ", i, data_size, 1. * i / data_size) i += 1 line = line.strip() image_path = os.path.join(args.input, line) buffer.append(image_path) if len(buffer) == args.batch_size: embedding = get_feature(buffer) buffer = [] fend = fstart + embedding.shape[0] if features_all is None: features_all = np.zeros((data_size, emb_size), dtype=np.float32) # print('writing', fstart, fend) features_all[fstart:fend, :] = embedding fstart = fend if len(buffer) > 0: embedding = get_feature(buffer) fend = fstart + embedding.shape[0] print('writing', fstart, fend) features_all[fstart:fend, :] = embedding write_bin(args.output, features_all)
from easydict import EasyDict as edict import os import numpy as np cfg = edict() ## trainer trainer = edict( gpu=0, max_epochs=8, disp_iter=10, save_iter=8, test_iter=2, training_func="train_mono_depth", evaluate_func="evaluate_kitti_depth", ) cfg.trainer = trainer ## path path = edict() path.raw_path = "/content/visualDet3D/visualDet3D/data/kitti_raw" path.depth_path = "/content/visualDet3D/visualDet3D/data/kitti_obj/data_depth_annotated/train" path.validation_path = "/content/visualDet3D/visualDet3D/data/kitti_obj/data_depth_selection/depth_selection/val_selection_cropped" path.test_path = "/content/visualDet3D/visualDet3D/data/kitti_obj/data_depth_selection/depth_selection/test_depth_prediction_anonymous" path.visualDet3D_path = "/content/visualDet3D/visualDet3D" # The path should point to the inner subfolder path.project_path = "/content/visualDet3D/workdirs" # or other path for pickle files, checkpoints, tensorboard logging and output files. if not os.path.isdir(path.project_path): os.mkdir(path.project_path)
from easydict import EasyDict as edict import yaml _C = edict() config = _C # ------------------------------------------------------------------------------------- # # Common options # ------------------------------------------------------------------------------------- # _C.RNG_SEED = -1 _C.OUTPUT_PATH = '' _C.MODULE = '' _C.GPUS = '' _C.LOG_FREQUENT = 50 _C.VAL_FREQUENT = 1 _C.CHECKPOINT_FREQUENT = 1 _C.MODEL_PREFIX = '' _C.NUM_WORKERS_PER_GPU = 4 _C.SCALES = () # ------------------------------------------------------------------------------------- # # Common dataset options # ------------------------------------------------------------------------------------- # _C.DATASET = edict() _C.DATASET.DATASET = '' _C.DATASET.ANSWER_VOCAB_FILE = '' _C.DATASET.ANSWER_VOCAB_SIZE = 9 _C.DATASET.LABEL_INDEX_IN_BATCH = -1 _C.DATASET.APPEND_INDEX = False _C.DATASET.TASK = 'Q2AR' _C.DATASET.BASIC_ALIGN = False
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import os.path as osp import numpy as np # `pip install easydict` if you don't have it from easydict import EasyDict as edict __C = edict() # Consumers can get config by: # from fast_rcnn_config import cfg cfg = __C # # Training options # __C.TRAIN = edict() # Initial learning rate __C.TRAIN.LEARNING_RATE = 0.001 # Momentum __C.TRAIN.MOMENTUM = 0.9 # Weight decay, for regularization __C.TRAIN.WEIGHT_DECAY = 0.0005 # Factor for reducing the learning rate __C.TRAIN.GAMMA = 0.1
def get_args(**kwargs) -> dict: """Get args from command line and add them to cfg dictionary Returns: dict: dictionary with config params of the training and network """ cfg = kwargs parser = argparse.ArgumentParser( description="Train the Model on images and target masks", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "-l", "--learning-rate", metavar="LR", type=float, nargs="?", default=0.001, help="Learning rate", dest="learning_rate", ) parser.add_argument( "-f", "--load", dest="load", type=str, default=None, help="Path to a .pth file to load", ) parser.add_argument( "-b", "--backbone", dest="backbone", type=str, default=os.path.abspath(".") + "/checkpoints/yolov4.weights", help="Load model from a .pth file", ) parser.add_argument("-g", "--gpu", metavar="G", type=str, default="-1", help="GPU id", dest="gpu") parser.add_argument( "-dir", "--data-dir", type=str, default=os.path.abspath("..") + "/data/KITTI/splits", help="dataset dir", dest="dataset_dir", ) parser.add_argument( "-names", "--names-path", type=str, default=os.path.abspath(".") + "/names/BEV.names", help="names file location", dest="names_path", ) parser.add_argument("-classes", type=int, default=1, help="dataset classes") parser.add_argument( "-optimizer", type=str, default="adam", help="training optimizer", dest="TRAIN_OPTIMIZER", ) parser.add_argument( "-iou-type", type=str, default="IoU", help="iou type (IoU, gIoU, rIoU, rgIoU)", dest="iou_type", ) parser.add_argument( "-keep-checkpoint-max", type=int, default=10, help= "maximum number of checkpoints to keep. If set 0, all checkpoints will be kept", dest="keep_checkpoint_max", ) args = vars(parser.parse_args()) cfg.update(args) return edict(cfg)
import numpy as np import os from easydict import EasyDict as edict config = edict() config.bn_mom = 0.9 config.workspace = 256 config.emb_size = 512 config.ckpt_embedding = True config.net_se = 0 config.net_act = 'prelu' config.net_unit = 3 config.net_input = 1 config.net_blocks = [1, 4, 6, 2] config.net_output = 'E' config.net_multiplier = 1.0 config.val_targets = ['lfw', 'cfp_fp', 'agedb_30'] config.ce_loss = True config.fc7_lr_mult = 1.0 config.fc7_wd_mult = 1.0 config.fc7_no_bias = False config.max_steps = 0 config.data_rand_mirror = True config.data_cutoff = False config.data_color = 0 config.data_images_filter = 0 config.count_flops = True config.memonger = False #not work now # network settings
import numpy as np from easydict import EasyDict as edict from tensorpack import * from tensorpack.tfutils.symbolic_functions import * from tensorpack.tfutils.summary import * cfg = edict() cfg.name = 'english' cfg.input_height = 20 cfg.input_width = None cfg.input_channel = 1 cfg.cnn = edict() cfg.cnn.padding = "SAME" cfg.cnn.channels = [32, 32, 32, 32, 64, 64, 64] cfg.cnn.kernel_heights = [3, 3, 3, 3, 3, 3, 3] cfg.cnn.kernel_widths = [3, 3, 3, 3, 3, 3, 3] cfg.cnn.with_bn = True cfg.rnn = edict() cfg.rnn.hidden_size = 660 cfg.rnn.hidden_layers_no = 3 cfg.weight_decay = 5e-4 cfg.dictionary = [ " ", "\"", "$", "%", "&", "'", "(", ")", "*", "-", ".", "/", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ":", "<", ">", "?", "[", "]", "a", "b",
BASE_PATH_CONFIG = os.path.join(AI_CODE_BASE_PATH, 'config') APP_ROOT_DIR = os.path.join(AI_CODE_BASE_PATH, 'apps') if APP_ROOT_DIR not in sys.path: sys.path.insert(0, APP_ROOT_DIR) import _cfg_ from annon.dataset.Annon import ANNON import apputil from _log_ import logcfg log = logging.getLogger(__name__) logging.config.dictConfig(logcfg) appcfg = _cfg_.load_appcfg(BASE_PATH_CONFIG) appcfg = edict(appcfg) from detectron2.structures import BoxMode from detectron2.utils import visualizer from detectron2.data import MetadataCatalog, DatasetCatalog # %matplotlib inline # In[3]: # MODELINFO_FILEPATH = '/codehub/cfg/model/release/vidteq-ods-7-mask_rcnn.yml' HOST = "10.4.71.69" AI_ANNON_DATA_HOME_LOCAL = "/aimldl-dat/data-gaze/AIML_Annotation/ods_job_230119" cmd = "train" dbname = "PXL-291119_180404"
import yaml from easydict import EasyDict as edict config = edict() config.backbone = "resnet101" config.cache_folder = "cache" config.model_root = "../model_zoo" config.data_root = "../data_root" config.kvstore = "device" config.tag = "default" config.mean = [.485, .456, .406] config.var = [.229, .224, .225] config.num_class = 19 config.p1 = 0.6 config.p2 = 0.8 config.TRAIN = edict() config.TRAIN.source_dataset = "gtav" config.TRAIN.source_shorter_min = 720 config.TRAIN.source_shorter_max = 720 config.TRAIN.source_min_scale = 0.7 config.TRAIN.source_max_scale = 1.3 config.TRAIN.source_crop_size = [1280, 720] config.TRAIN.source_random_flip = True config.TRAIN.source_random_gaussian = False config.TRAIN.target_dataset = "cityscapes" config.TRAIN.target_shorter_min = 512 config.TRAIN.target_shorter_max = 512 config.TRAIN.target_min_scale = 0.7
def get_args(**kwargs): cfg = kwargs parser = argparse.ArgumentParser( description='Train the Model on images and target masks', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=2, # help='Batch size', dest='batchsize') parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=0.001, help='Learning rate', dest='learning_rate') parser.add_argument('-f', '--load', dest='load', type=str, default=None, help='Load model from a .pth file') parser.add_argument('-g', '--gpu', metavar='G', type=str, default='-1', help='GPU', dest='gpu') parser.add_argument('-dir', '--data-dir', type=str, default=None, help='dataset dir', dest='dataset_dir') parser.add_argument('-pretrained', type=str, default=None, help='pretrained yolov4.conv.137') parser.add_argument('-classes', type=int, default=9, help='dataset classes') parser.add_argument('-train_label_path', dest='train_label', type=str, default='data/train.txt', help="train label path") parser.add_argument('-optimizer', type=str, default='adam', help='training optimizer', dest='TRAIN_OPTIMIZER') parser.add_argument('-iou-type', type=str, default='iou', help='iou type (iou, giou, diou, ciou)', dest='iou_type') parser.add_argument( '-keep-checkpoint-max', type=int, default=10, help= 'maximum number of checkpoints to keep. If set 0, all checkpoints will be kept', dest='keep_checkpoint_max') args = vars(parser.parse_args()) # for k in args.keys(): # cfg[k] = args.get(k) cfg.update(args) return edict(cfg)
def __init__(self): # self.ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname("__file__"))) self.ROOT_DIR = osp.abspath(os.path.dirname("__file__")) self.EXP_NAME = osp.splitext( osp.basename(__file__))[0] # 获取config.py 名称作为model 名称 # ----------------- dataset ---------------------- if flag_debug is True: self.txt_f = '/home/xjx/data/mask/seg_coco_LIP/layout/split_train_val/precise_coco_lip_train.txt' self.root_dir = '/home/xjx/data/mask/seg_coco_LIP' self.val_txt_f = '/home/xjx/data/mask/seg_coco_LIP/layout/split_train_val/precise_coco_lip_val.txt' self.val_root_dir = '/home/xjx/data/mask/seg_coco_LIP' else: self.txt_f = '/home/zhangming/SSD_DATA/Skin/compaq_skin_data/compaq_skin_data_train.txt' self.root_dir = '/home/zhangming/SSD_DATA/Skin/compaq_skin_data' self.val_txt_f = '/home/zhangming/SSD_DATA/Skin/compaq_skin_data/compaq_skin_data_val.txt' self.val_root_dir = '/home/zhangming/SSD_DATA/Skin/compaq_skin_data' self.DATA_WORKERS = 8 self.DATA_RESCALE = int(512) self.DATA_RANDOMCROP = 0 self.DATA_RANDOMROTATION = 0 self.DATA_RANDOMSCALE = 1 # 1/r ~ r 的范围 self.DATA_RANDOM_H = 10 self.DATA_RANDOM_S = 10 self.DATA_RANDOM_V = 10 self.DATA_RANDOMFLIP = 0.5 self.DATA_gt_precise = 0 # 边缘细化 向内缩进2个像素 self.edge_width = 0 # 边缘宽度 # self.INIT_dataset = edict({ # "type": "RemoDataset", # "args": { # 'txt_f': self.txt_f, # 'root_dir': self.root_dir, # # 'DATA_RESCALE': 512, # 'DATA_RANDOMCROP': 0, # 'DATA_RANDOMROTATION': 0, # 'DATA_RANDOMSCALE': 2, # 1/r ~ r 的范围 # 'DATA_RANDOM_H': 10, # 'DATA_RANDOM_S': 10, # 'DATA_RANDOM_V': 10, # 'DATA_RANDOMFLIP': 0.5, # 'DATA_gt_precise': 0, # 'edge_width': 0 # } # }) # # self.INIT_dataset_val = copy.deepcopy(self.INIT_dataset) # self.INIT_dataset_val.args.update(edict({ # 'val_txt_f': self.val_txt_f, # 'val_root_dir': self.val_root_dir, # }) # ) # ----------------- model ------------------------ self.MODEL_SAVE_DIR = osp.join(self.ROOT_DIR, "ckpt", 'model', self.EXP_NAME) self.LOG_DIR = osp.join(self.ROOT_DIR, 'ckpt', 'log', self.EXP_NAME) self.MODEL_NUM_CLASSES = 2 # class self.INIT_model = edict({ "type": "deeplabv3plus", "args": { 'num_classes': self.MODEL_NUM_CLASSES, "MODEL_BACKBONE": 'res50_atrous', 'MODEL_OUTPUT_STRIDE': 16, 'MODEL_ASPP_OUTDIM': 256, 'MODEL_SHORTCUT_DIM': 48, 'MODEL_SHORTCUT_KERNEL': 1, } }) # ----------------- loss ------------------------- self.INIT_loss = edict({ "type": "CE_loss", "args": { 'ignore_index': 255 } }) # ----------------- optmi ------------------------ self.TRAIN_LR = 0.01 # learning rate self.INIT_optim = edict({ "type": "SGD", "args": { # 'lr': self.TRAIN_LR, "momentum": 0.9, "weight_decay": 0.00004, } }) self.INIT_params = edict({ "type": "Param_change", "args": { "lr": self.TRAIN_LR } }) # ----------------- adjust lr -------------------- self.INIT_adjust_lr = edict({ "type": "LRsc_poly", "args": { 'power': 0.9 } }) self.TRAIN_BN_MOM = 0.0003 # sy bn 参数 if flag_debug is True: self.GPUS_ID = [0, 0] self.batch_size_per_gpu = 2 # bs else: self.GPUS_ID = [2, 3] self.batch_size_per_gpu = 8 # 每张卡的bs self.TRAIN_SAVE_CHECKPOINT = 800 # save checkpoint self.TRAIN_SHUFFLE = True self.display = 10 self.TRAIN_MINEPOCH = 0 self.TRAIN_EPOCHS = 96 # epoch self.TRAIN_TBLOG = True self.TRAIN_CKPT = None # =========== val ================== if flag_debug: self.val_batch_size_per_gpu = 2 else: self.val_batch_size_per_gpu = 4 self.VAL_CHECKPOINT = 200 # val checkpoint self.VAL_SHUFFLE = False self.TEST_MULTISCALE = [1] # multisscale self.TEST_FLIP = True self.TEST_CKPT = '' self.TEST_GPUS = 1 self.TEST_BATCHES = 1 # bs self.__check() self.__add_path(os.path.join(self.ROOT_DIR, 'lib'))
import os import os.path as osp import numpy as np import copy from easydict import EasyDict as edict cfg = edict() cfg.MODEL_NAME = None # cfg.CONV_TYPE = 'conv' # cfg.BN_TYPE = 'bn' # cfg.RELU_TYPE = 'relu' # resnet cfg_resnet = copy.deepcopy(cfg) cfg_resnet.MODEL_NAME = 'resnet' cfg_resnet.RESNET = edict() cfg_resnet.RESNET.MODEL_TAGS = None cfg_resnet.RESNET.PRETRAINED_PTH = None cfg_resnet.RESNET.BN_TYPE = 'bn' cfg_resnet.RESNET.RELU_TYPE = 'relu' # deeplab cfg_deeplab = copy.deepcopy(cfg) cfg_deeplab.MODEL_NAME = 'deeplab' cfg_deeplab.DEEPLAB = edict() cfg_deeplab.DEEPLAB.MODEL_TAGS = None cfg_deeplab.DEEPLAB.PRETRAINED_PTH = None cfg_deeplab.DEEPLAB.FREEZE_BACKBONE_BN = False cfg_deeplab.DEEPLAB.BN_TYPE = 'bn' cfg_deeplab.DEEPLAB.RELU_TYPE = 'relu'
for person_name in person_names: _subdir = os.path.join(input_dir) # print(_subdir) if not os.path.isdir(_subdir): continue cnt = 0 for _subdir2 in os.listdir(_subdir): _subdir2 = os.path.join(_subdir, _subdir2) cnt = cnt + 1 #if not os.path.isdir(_subdir2): #continue _ret = [] #print('got') for img in os.listdir(_subdir2): # print(label) fimage = edict() #print('here') fimage.id = os.path.join(_subdir2, img) fimage.classname = str(label) fimage.image_path = os.path.join(_subdir2, img) #print(fimage.image_path) fimage.bbox = None fimage.landmark = None #f.write(str(1) + '\t' + str(fimage.image_path) + '\t' + fimage.classname + '\n') _ret.append(fimage) # print(fimage) ret += _ret print(label) label+=1 print(ret)
# encoding: utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path as osp import sys import time import numpy as np from easydict import EasyDict as edict import argparse C = edict() config = C cfg = C C.seed = 304 """please config ROOT_dir and user when u first using""" C.abs_dir = osp.realpath(".") C.this_dir = C.abs_dir.split(osp.sep)[-1] C.root_dir = C.abs_dir[:C.abs_dir.index('model')] C.log_dir = osp.abspath(osp.join(C.root_dir, 'log', C.this_dir)) C.log_dir_link = osp.join(C.abs_dir, 'log') C.snapshot_dir = osp.abspath(osp.join(C.log_dir, "snapshot")) exp_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime()) C.log_file = C.log_dir + '/log_' + exp_time + '.log' C.link_log_file = C.log_file + '/log_last.log' C.val_log_file = C.log_dir + '/val_' + exp_time + '.log' C.link_val_log_file = C.log_dir + '/val_last.log'
# Copyright (c) Microsoft # Licensed under the MIT License. # Written by Bin Xiao ([email protected]) # ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import yaml import numpy as np from easydict import EasyDict as edict config = edict() config.OUTPUT_DIR = '' config.LOG_DIR = '' config.DATA_DIR = '' config.GPUS = '0' config.WORKERS = 0 config.PRINT_FREQ = 20 # pose_resnet related params POSE_RESNET = edict() POSE_RESNET.NUM_LAYERS = 18 POSE_RESNET.DECONV_WITH_BIAS = False POSE_RESNET.NUM_DECONV_LAYERS = 3 POSE_RESNET.NUM_DECONV_FILTERS = [256, 256, 256] POSE_RESNET.NUM_DECONV_KERNELS = [4, 4, 4]
"""Graph matching config system.""" import os from easydict import EasyDict as edict __C = edict() # Consumers can get config by: cfg = __C __C.combine_classes = False # VOC2011-Keypoint Dataset __C.VOC2011 = edict() __C.VOC2011.KPT_ANNO_DIR = "./data/downloaded/PascalVOC/annotations/" # keypoint annotation __C.VOC2011.ROOT_DIR = "./data/downloaded/PascalVOC/VOC2011/" # original VOC2011 dataset __C.VOC2011.SET_SPLIT = "./data/split/voc2011_pairs.npz" # set split path __C.VOC2011.CLASSES = [ "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
from cosine_scheduler import CosineLRWithRestarts from tqdm import tqdm IN_KERNEL = os.environ.get('KAGGLE_WORKING_DIR') is not None if not IN_KERNEL: import torchsummary from pytorchcv.model_provider import get_model from hyperopt import hp, tpe, fmin else: from model_provider import get_model import albumentations as albu from easydict import EasyDict as edict # type: ignore opt = edict() opt.INPUT = '../input/imet-2019-fgvc6/' if IN_KERNEL else '../input/' opt.MODEL = edict() opt.MODEL.ARCH = 'seresnext101_32x4d' # opt.MODEL.IMAGE_SIZE = 256 opt.MODEL.INPUT_SIZE = 352 # crop size opt.MODEL.VERSION = os.path.splitext(os.path.basename(__file__))[0][6:] opt.MODEL.DROPOUT = 0.5 opt.MODEL.NUM_CLASSES = 1103 opt.EXPERIMENT_DIR = f'../models/{opt.MODEL.VERSION}' opt.TRAIN = edict() opt.TRAIN.NUM_FOLDS = 5 opt.TRAIN.BATCH_SIZE = 16 * torch.cuda.device_count()
from easydict import EasyDict as edict import numpy as np __C = edict() cfg = __C # 0. basic config __C.TAG = 'default' __C.CLASSES = 'Multiclass' #### 'Car' __C.INCLUDE_SIMILAR_TYPE = False # config of augmentation __C.AUG_DATA = True __C.AUG_METHOD_LIST = ['rotation', 'scaling', 'flip'] __C.AUG_METHOD_PROB = [0.5, 0.5, 0.5] __C.AUG_ROT_RANGE = 18 __C.GT_AUG_ENABLED = False __C.GT_EXTRA_NUM = 15 __C.GT_AUG_RAND_NUM = False __C.GT_AUG_APPLY_PROB = 0.75 __C.GT_AUG_HARD_RATIO = 0.6 __C.PC_REDUCE_BY_RANGE = True __C.PC_AREA_SCOPE = np.array([[-40, 40], [-1, 3], [0, 70.4]]) # x, y, z scope in rect camera coords __C.CLS_MEAN_SIZE = np.array([[1.52, 1.63, 3.88]], dtype=np.float32)
This file specifies default config options for Fast R-CNN. You should not change values in this file. Instead, you should write a config file (in yaml) and use cfg_from_file(yaml_file) to load it and override the default options. Most tools in $ROOT/tools take a --cfg option to specify an override file. - See tools/{train,test}_net.py for example code that uses cfg_from_file() - See experiments/cfgs/*.yml for example YAML config override files """ import os import os.path as osp import numpy as np # `pip install easydict` if you don't have it from easydict import EasyDict as edict __C = edict() # Consumers can get config by: # from fast_rcnn_config import cfg cfg = __C # # Training options # __C.TRAIN = edict() # Scales to use during training (can list multiple scales) # Each scale is the pixel size of an image's shortest side __C.TRAIN.SCALES = (600, ) # Max pixel size of the longest side of a scaled input image
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ network config setting, will be used in train.py """ from easydict import EasyDict as edict alexnet_cfg = edict({ 'num_classes': 10, 'learning_rate': 0.002, 'momentum': 0.9, 'epoch_size': 1, 'batch_size': 32, 'buffer_size': 1000, 'image_height': 227, 'image_width': 227, 'save_checkpoint_steps': 1562, 'keep_checkpoint_max': 10, })
def coco(mc, train_graph, eval_graph): with tf.name_scope("COCO_input"): keys_to_features = imdb.get_keys_to_features() # return tf.parse_example(batch_records, keys_to_features) dataset_train_path = os.path.join(mc.DATA_PATH, "coco_train.record") dataset_eval_path = os.path.join(mc.DATA_PATH, "coco_val.record") # create a new dataset with preprocessed/filtered records if (mc.REDUCE_DATASET and not mc.ALREADY_PREPROCESSED): imdb.reduce_dataset_by_class(mc, keys_to_features, dataset_set="train") if (eval_graph): eval_mc = edict(mc.copy()) # eval_mc.BATCH_SIZE = 1 eval_mc.IS_TRAINING = False eval_mc.DATA_AUGMENTATION = False mc.EVAL_ITERS = imdb.reduce_dataset_by_class(eval_mc, keys_to_features, dataset_set="val") eval_mc.EVAL_ITERS = mc.EVAL_ITERS dataset_train_path = os.path.join( mc["BASE_DIR"], "preprocessed_" + mc.DATASET_NAME.lower() + "_train.record") dataset_eval_path = os.path.join( mc["BASE_DIR"], "preprocessed_" + mc.DATASET_NAME.lower() + "_val.record") print("EVAL ITERS :%d" % (mc.EVAL_ITERS)) if (mc.REDUCE_DATASET and mc.PREPROCESSED_DATA_DIR): dataset_train_path = os.path.join( mc["PREPROCESSED_DATA_DIR"], "preprocessed_" + mc.DATASET_NAME.lower() + "_train.record") dataset_eval_path = os.path.join( mc["PREPROCESSED_DATA_DIR"], "preprocessed_" + mc.DATASET_NAME.lower() + "_val.record") # get anchor boxes before creating the input graph mc.ANCHOR_BOX, mc.ANCHORS = imdb.get_anchor_box_from_dataset( mc, dataset_train_path, keys_to_features) # prepare training dataset if train_graph: with train_graph.as_default(): dataset_train = tf.contrib.data.make_batched_features_dataset( dataset_train_path, mc.BATCH_SIZE, keys_to_features, num_epochs=mc.TRAIN_EPOCHS, reader_num_threads=8, parser_num_threads=8, shuffle_buffer_size=13000 if mc.IS_TRAINING else 512, sloppy_ordering=True) it_train = dataset_train.make_one_shot_iterator() train_list = imdb.load_data(it_train.get_next(), mc, training=True, image_decoder=tf.image.decode_jpeg) else: train_list = None # prepare evaluation dataset if eval_graph: with eval_graph.as_default(): eval_mc = edict(mc.copy()) # eval_mc.BATCH_SIZE = 1 eval_mc.IS_TRAINING = False eval_mc.DATA_AUGMENTATION = False dataset_eval = tf.contrib.data.make_batched_features_dataset( dataset_eval_path, eval_mc.BATCH_SIZE, keys_to_features, num_epochs=None, reader_num_threads=8, parser_num_threads=8, shuffle=False, drop_final_batch=True) it_eval = dataset_eval.make_one_shot_iterator() eval_list = imdb.load_data(it_eval.get_next(), eval_mc, training=False, image_decoder=tf.image.decode_png) else: eval_list = None return train_list, eval_list, mc
from __future__ import division from __future__ import print_function import os import os.path as osp import numpy as np from easydict import EasyDict as edict __C = edict() cfg = __C # Dataset __C.DATASET_NAME = '' __C.EMBEDDING_TYPE = 'cnn-rnn' __C.CONFIG_NAME = '' __C.GPU_ID = '0' __C.CUDA = True __C.WORKERS = 4 __C.NET_G = '' __C.NET_D = '' __C.STAGE1_G = '' __C.PROCESSED_DATA_DIR = '' __C.RAW_DATA_DIR = '' __C.PROBLEMATIC_NRRD_PATH = os.path.join( __C.PROCESSED_DATA_DIR, 'problematic_nrrds_shapenet_unverified_256_filtered_div_with_err_textures.p' ) __C.OUTPUT_DIR = '' __C.VIS_COUNT = 5
if itr % 500 == 0: print "Compression:{}, Accuracy:{}".format( 1. / get_sparsity(solver.net), test_net(solver.net, _count=1, _start="conv1")) if len(tmp_ind) > 0 and itr < prune_stop_iter: # run at window @6 _tmp_c = np.array(len(crates_list) * [-1.]) for t_name in tmp_ind: _tmp_c[layer_inds[t_name]] = crates[t_name] apply_prune(solver.net, _tmp_c) #if len(tmp_ind)>1 and itr < prune_stop_iter: if itr % 1000 == 0 and len( tmp_ind) > 1 and itr < prune_stop_iter: # run at window @3 accuracy_ = test_net(solver.net, _count=1, _start="conv1") es = {} if es_method == 'ncs': __C = edict() __C.parameters = { 'reset_xl_to_pop': False, 'init_value': tmp_crates, 'stepsize': ncs_stepsize, 'bounds': [0.0, 20.], 'ftarget': 0, 'tmax': 1600, 'popsize': 10, 'best_k': 1 } es = ncs.NCS(__C.parameters) print '***************NCS initialization***************' tmp_x_ = np.array(crates_list) tmp_input_x = tmp_crates for _ii in range(len(tmp_ind)):
from models import * from utils import progress_bar, Visualizer, lr_schedule from resnetdataset import * from collections import OrderedDict from easydict import EasyDict as edict os.environ['CUDA_VISIBLE_DEVICES'] = '0' cfg = edict({ 'version': '20201127', 'model': 'resnet50', 'data_root': 'G:/DataBase/02_ZS_HCC_pathological/03-code/03-model', # root directory to train/test dataset. 'data_version': 'size256_down2', 'lr': 1e-3, 'restore': 'G:/DataBase/02_ZS_HCC_pathological/03-code/03-model/checkpoint/20201127ckpt.pth' }) device = 'cuda' if torch.cuda.is_available() else 'cpu' best_patch_acc = 0 # best test accuracy best_patient_acc = 0 start_epoch = 0 # start from epoch 0 or last checkpoint epoch # Data print('==> Preparing data..') # trainset = RESNETdataset(cfg.data_root, cfg.data_version, (224, 224), 'train', augmentation=False)