def test_is_timestamp(self): try: BaseModel().is_timestamp(123456789) except: self.fail('Error: timestamp should be valid') try: BaseModel().is_timestamp(time.time()) except: self.fail('Error: timestamp should be valid') try: BaseModel().is_timestamp(123456789.54321) except: self.fail('Error: timestamp should be valid') with self.assertRaises(AssertionError): BaseModel().is_timestamp('Friday December 24 12:34:56 GMT 2015') with self.assertRaises(AssertionError): BaseModel().is_timestamp('Sun Jan 32 23:00:00 GMT 1980') with self.assertRaises(AssertionError): BaseModel().is_timestamp('Sat Apr 9 24:00:00 MST 2012') with self.assertRaises(AssertionError): BaseModel().is_timestamp(-12345) with self.assertRaises(AssertionError): BaseModel().is_timestamp(True) with self.assertRaises(AssertionError): BaseModel().is_timestamp(False) with self.assertRaises(AssertionError): BaseModel().is_timestamp(-123321123.321)
def wrapper(self, *args, **kwargs): sign = self.get_argument('sign', None) if not sign: raise ValueError(404) appid = self.get_argument('appid', None) if not appid: raise ValueError(405) model_base = BaseModel() arguments = sorted(self.request.arguments.iteritems(), key=lambda x: x[0]) result_string = ''.join([k + v[0] for k, v in arguments if k != 'sign']) appsecret = model_base.getAppSercet(appid) if not appsecret: raise ValueError(405) def default(*args): raise ValueError(403) def md5Method(result_string, appsecret): return hashlib.md5(appsecret + result_string + appsecret).hexdigest() switch = { 'md5': md5Method, } mysign = switch.get(self.get_argument('sign_method', None), default)(result_string, appsecret) logger.info("sign:%s" % mysign) if mysign != sign: raise ValueError(402) return func(*args, **kwargs)
def test_is_falsey(self): try: BaseModel().is_falsey(False) BaseModel().is_falsey(0) BaseModel().is_falsey(None) except: self.fail('Error: data should be falsey?') for i in [True, 1, "string"]: with self.assertRaises(AssertionError): BaseModel().is_falsey(i)
def test_not_empty(self): try: BaseModel().is_not_empty('fdsa') BaseModel().is_not_empty([1, 2, 3]) except: self.fail('Error: data should not be empty / have length > 0') with self.assertRaises(AssertionError): BaseModel().is_not_empty([]) with self.assertRaises(AssertionError): BaseModel().is_not_empty('')
def test_is_truthy(self): try: BaseModel().is_truthy(True) BaseModel().is_truthy(1) BaseModel().is_truthy("string") except: self.fail('Error: data as True or 1 should be Truthy') for i in [False, 0, None]: with self.assertRaises(AssertionError): BaseModel().is_truthy(i)
def __init__(self, num_input_channels, num_latent_dims, num_classes, arch_key, arch_depth, train_all): Module.__init__(self) BaseModel.__init__(self, num_input_channels, num_latent_dims, arch_key, arch_depth) self.classifier = LatentClassifier(num_latent_dims, num_classes) if not train_all: for param in self.encoder.parameters(): param.requires_grad = False
def test_is_int(self): try: BaseModel().is_int(3) except AssertionError: self.fail('Error: 3 is not recognized as a number from is_int') try: BaseModel().is_int(15.235232) except AssertionError: self.fail( 'Error: 15.235232 is not recognized as a number from is_int') for i in ['string', [1, 2, 3], False, True]: with self.assertRaises(AssertionError): BaseModel().is_int(i)
def test_is_none(self): try: BaseModel().is_none(None) except: self.fail('Error: None should be empty') with self.assertRaises(AssertionError): BaseModel().is_none('') with self.assertRaises(AssertionError): BaseModel().is_none(0) with self.assertRaises(AssertionError): BaseModel().is_none([]) with self.assertRaises(AssertionError): BaseModel().is_none(True)
def gen_model(opts): if opts.model == 'base': from models.basemodel import BaseModel return BaseModel(opts) elif opts.model == 'base_pt': from models.basemodel_pt import BaseModel_pt return BaseModel_pt(opts) elif opts.model == 'base_small': from models.basemodel_small import BaseModel_small return BaseModel_small(opts) elif opts.model == 'lstm': from models.LSTMModel import LSTMModel return LSTMModel(opts) elif opts.model == 'lstm_pt': from models.LSTMModel_pt import LSTMModel_pt return LSTMModel_pt(opts) elif opts.model == 'lstm_interm': from models.LSTMModel_interm import LSTMModel_interm return LSTMModel_interm(opts) elif opts.model == 'attn': from models.attention import AttnModel return AttnModel(opts) elif opts.model == 'hourglass': from models.stackedhourglassmodel import stacked_hourglass return stacked_hourglass(opts) else: raise Exception('Model type not found')
def __init__(self, num_input_channels, num_latent_dims, arch_key, arch_depth): Module.__init__(self) BaseModel.__init__(self, num_input_channels, num_latent_dims, arch_key, arch_depth) # VaeNet attributes self.arch_dec = _ARCH_DICT_DEC[arch_key] self.bottleneck = VAEBottleneck(num_latent_dims) if self.arch_dec == "dlenet": assert arch_depth == 9 self.decoder = getattr(edlenet, self.arch_dec + str(arch_depth))(num_input_channels=num_input_channels, num_latent_dims=num_latent_dims) elif self.arch_dec == "dresnet": assert arch_depth in [18, 34, 50, 101, 152] self.decoder = getattr(edresnet, self.arch_dec + str(arch_depth))(num_input_channels=num_input_channels, num_latent_dims=num_latent_dims) else: raise NotImplementedError
def test_schema_or(self): is_truthy = BaseModel().is_truthy is_falsey = BaseModel().is_falsey is_int = BaseModel().is_int is_string = BaseModel().is_string true_or_false = BaseModel().schema_or(is_truthy, is_falsey) int_or_str = BaseModel().schema_or(is_int, is_string) try: true_or_false(False) true_or_false(True) true_or_false("True") true_or_false(0) except: self.fail('Error: schema_or(is_truthy, is_falsey) should succeed') try: int_or_str("salad") int_or_str(-99.345) int_or_str("True") int_or_str(0) except: self.fail( 'Error: schema_or(is_int, is_string) should succeed for this value' ) for i in [True, False, None, MockModel()]: with self.assertRaises(AssertionError): int_or_str(i)
def load_model(self, model_path, args): if args.model == 'NON_ADAPTIVE_A3C': self.model = BaseModel(args) elif args.model == 'GCN': self.model = GCN(args) else: self.model = SAVN(args) saved_state = torch.load(model_path, map_location=lambda storage, loc: storage) self.model.load_state_dict(saved_state) self.model_options = ModelOptions() self.model_options.params = get_params(self.model, args.gpu_id)
def test_is_in_list(self): try: BaseModel().is_in_list([1, 2, 3])(1) BaseModel().is_in_list([1, 2, 3])(2) BaseModel().is_in_list([1, 2, 3])(3) except: self.fail('Error: is_in_list should work with integers') try: BaseModel().is_in_list([1.2, 2.3, 3.4])(1.2) BaseModel().is_in_list([1.2, 2.3, 3.4])(2.3) BaseModel().is_in_list([1.2, 2.3, 3.4])(3.4) except: self.fail('Error: is_in_list should work with floats') try: BaseModel().is_in_list(['hello', 'world', 'KITTY'])('hello') BaseModel().is_in_list(['hello', 'world', 'KITTY'])('world') BaseModel().is_in_list(['hello', 'world', 'KITTY'])('KITTY') except: self.fail('Error: is_in_list should work with strings') test_list = BaseModel.is_in_list(['Hello', 'World', True, 456]) for i in ['Goodbye', 'WORLD', False, 123]: with self.assertRaises(AssertionError): test_list(i)
def test_schema_list_check(self): b = BaseModel() check_or = b.schema_list_check(b.schema_or(b.is_falsey, b.is_string)) check_int = b.schema_list_check(b.is_int) check_truthy = b.schema_list_check(b.is_truthy) or_data = ['falsey', 0, 'or', False, 'string'] int_data = [-1, -34.55, 0, 4321, 1, 1004.567] truthy_data = ["Truthy", True, 1, "data"] try: check_or(or_data) check_int(int_data) check_truthy(truthy_data) except: self.fail('Error: schema_list_check should succeed') for i in [or_data, int_data]: with self.assertRaises(AssertionError): check_truthy(i) for i in [or_data, truthy_data]: with self.assertRaises(AssertionError): check_int(i) for i in [truthy_data, int_data]: with self.assertRaises(AssertionError): check_or(i)
def test_is_in_range(self): try: test_range = BaseModel().is_in_range(0, 4) test_range(0) test_range(1) test_range(2) test_range(3) test_range(4) except: self.fail('Error: is_in_range should work with integers') try: test_range = BaseModel().is_in_range(0, 4.5) test_range(0.5) test_range(1.9) test_range(2.2456767) test_range(3.9999) test_range(4.4999999) except: self.fail('Error: is_in_range should work with floats') test_range = BaseModel().is_in_range(0.5, 4.5) with self.assertRaises(AssertionError): test_range(0.49) with self.assertRaises(AssertionError): test_range(4.50001)
def main(): config = configparser.ConfigParser() config.read('config.ini') get_raw_data = bool(config.get('MAIN', 'get_raw_data')) train = bool(config.get('MAIN', 'train')) if get_raw_data: train_data = DataLoader().load_data((512, 512)) preprocessor = Preprocess() preprocessor.preprocess(train_data) x_train, y_train = LoadPickle().load_pickle() if train: model = BaseModel() Train(x_train, y_train, model)
def test_is_string(self): try: BaseModel().is_string('hello') except: self.fail('Error: hello should be a valid string') with self.assertRaises(AssertionError): BaseModel().is_string(123) with self.assertRaises(AssertionError): BaseModel().is_string(15.2) with self.assertRaises(AssertionError): BaseModel().is_string(True) with self.assertRaises(AssertionError): BaseModel().is_string(False) with self.assertRaises(AssertionError): BaseModel().is_string([1, 2, 3]) with self.assertRaises(AssertionError): BaseModel().is_string(['this', 'is', 'string', 'array'])
def test_is_list(self): try: BaseModel().is_list([1, 2, 3]) except: self.fail('Error: [1, 2, 3] should be a valid list') try: BaseModel().is_list(['a']) except: self.fail('Error: this should be a valid list of one string entry') try: BaseModel().is_list(['hello', 1, 2.5, False, [1, 2, 3]]) except: self.fail('Error: a list can consist of multiple data types') try: BaseModel().is_list(('hi', 1, True)) except: self.fail('Error: tuples should be valid') with self.assertRaises(AssertionError): BaseModel().is_list(1) with self.assertRaises(AssertionError): BaseModel().is_list('hi') with self.assertRaises(AssertionError): BaseModel().is_list(True)
def main(): utils.print_config(args) if 'train' not in args.mode: args.keep_rate = 1.0 args.use_pretrain = True if args.use_pretrain == 'True' else False args.use_aux_task = True if args.use_aux_task == 'True' else False if args.mode == 'lm_train': args.model = 'lm' args.data_path = "./data/wikitext/wikitext-103/processed_wiki_train.bin" args.use_pretrain = False args.model_path = os.path.join(args.model_path, args.exp_name).format( args.model) #model_path default="data/log/{} if not os.path.exists(args.model_path): if 'train' not in args.mode: print(args.model_path) raise ValueError os.makedirs(args.model_path) with open(os.path.join(args.model_path, 'config.json'), 'w', encoding='utf8') as f: json.dump(vars(args), f) print("Default models path: {}".format(args.model_path)) print('code start/ {} mode / {} models'.format(args.mode, args.model)) utils.assign_specific_gpu(args.gpu_nums) vocab = utils.Vocab() vardicts = utils.get_pretrain_weights( args.true_pretrain_ckpt_path ) if args.use_pretrain and args.mode == 'train' else None if args.mode == 'decode': if args.model == 'mmi_bidi': args.beam_size = args.mmi_bsize args.batch_size = args.beam_size modelhps = deepcopy(args) if modelhps.mode == 'decode': modelhps.max_dec_len = 1 if args.model == 'vanilla': model = BaseModel(vocab, modelhps) elif args.model == 'mmi_bidi': if args.mode == 'decode': bw_graph = tf.Graph() with bw_graph.as_default(): bw_model = BaseModel(vocab, args) bw_sess = tf.Session(graph=bw_graph, config=utils.gpu_config()) with bw_sess.as_default(): with bw_graph.as_default(): bidi_ckpt_path = utils.load_ckpt(bw_model.hps, bw_model.saver, bw_sess) fw_graph = tf.Graph() with fw_graph.as_default(): modelhps.model_path = modelhps.model_path.replace( 'mmi_bidi', 'vanilla') modelhps.model = 'vanilla' fw_model = BaseModel(vocab, modelhps) fw_sess = tf.Session(graph=fw_graph) with fw_sess.as_default(): with fw_graph.as_default(): ckpt_path = utils.load_ckpt(fw_model.hps, fw_model.saver, fw_sess) else: model = BaseModel(vocab, modelhps) elif args.model == 'lm': model = LMModel(vocab, modelhps) elif args.model == 'embmin': model = DiverEmbMin(vocab, modelhps) else: raise ValueError print('models load end') if args.mode in ['train', 'lm_train']: train(model, vocab, vardicts) elif args.mode == 'decode': import time if args.model == 'mmi_bidi': batcher = Batcher( vocab, bw_model.hps.data_path.replace('train_', 'test_'), args) decoder = BeamsearchDecoder(fw_model, batcher, vocab, fw_sess=fw_sess, bw_model=bw_model, bw_sess=bw_sess, bidi_ckpt_path=bidi_ckpt_path) else: batcher = Batcher(vocab, model.hps.data_path.replace('train_', 'test_'), args) decoder = BeamsearchDecoder(model, batcher, vocab) decoder.decode() elif args.mode == 'eval': pass
out = [] for path, subdirs, files in os.walk(path): for name in files: if name[-4:] == '.jpg': out.append(os.path.join(path, name)) return out if __name__ == '__main__': torch.set_grad_enabled(False) cfg = None net = None if args.trained_model is not None: cfg = cfg_plate net = BaseModel(cfg=cfg, phase='test') else: print("Don't support network!") exit(0) net = load_model(net, args.trained_model, args.cpu) net.eval() print('Finished loading model!') print(net) from torchscope import scope scope(net, input_size=(3, 480, 850)) cudnn.benchmark = True device = torch.device("cpu" if args.cpu else "cuda") net = net.to(device) image_paths = get_image_path(args.image_path)
def main_worker(args): if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) # Log in Tensorboard writer = SummaryWriter() # log init save_dir = os.path.join('logs', 'train' + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) if os.path.exists(save_dir): raise NameError('model dir exists!') os.makedirs(save_dir) logger = init_log(save_dir) train_dataset = labelFpsDataLoader("/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_base", preproc=preproc(cfg_plate['image_size'], (104, 117, 123))) # valid_dataset = ValDataset(os.path.join("./data/widerface/val", "data/train/label.txt")) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, collate_fn=detection_collate, pin_memory=True) # valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, # num_workers=args.workers, collate_fn=detection_collate, pin_memory=True) # Initialize model model = BaseModel(cfg=cfg_plate) checkpoint = [] if args.resume is not None: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) if args.gpu is None: checkpoint = torch.load(args.resume) else: # Map model to be loaded to specified single gpu. loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(args.resume, map_location=loc) params = checkpoint['parser'] # args = params args.start_epoch = checkpoint['epoch'] + 1 model.load_state_dict(checkpoint['state_dict']) del params del checkpoint if args.gpu is not None: torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) else: model = model.cuda() print('Run with DataParallel ....') model = torch.nn.DataParallel(model).cuda() priorbox = PriorBox(cfg_plate) with torch.no_grad(): priors = priorbox.forward() priors = priors.cuda() criterion = MultiBoxLoss(args.num_classes, 0.35, True, 0, True, 7, 0.35, False) # Define optimizer optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # Define learning rate scheduler scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=5, verbose=True) logger.info('Step per opoch: {}'.format(len(train_loader))) # Start training per epoch recall, precision = 0, 0 for epoch in range(args.start_epoch, args.epochs): train_loss = train(train_loader, model, priors, criterion, optimizer, scheduler, epoch, logger, args) # if epoch % args.eval_freq == 0: # recall, precision = evaluate(valid_loader, model) # # logger.info('Recall: {:.4f} \t' # 'Prcision: {:.3f} \t'.format(recall, precision)) # Log to Tensorboard lr = optimizer.param_groups[0]['lr'] writer.add_scalar('model/train_loss', train_loss, epoch) writer.add_scalar('model/learning_rate', lr, epoch) # writer.add_scalar('model/precision', precision, epoch) # writer.add_scalar('model/recall', recall, epoch) # scheduler.step() scheduler.step(train_loss) state = { 'epoch': epoch, 'parser': args, 'state_dict': get_state_dict(model) } torch.save( state, os.path.join( args.save_folder, args.network, "{}_{}.pth".format(args.network, epoch)))
def test_is_valid_email(self): try: BaseModel().is_valid_email('*****@*****.**') except: self.fail('Error: this should be a valid colorado school email') try: BaseModel().is_valid_email('*****@*****.**') except: self.fail('Error: [email protected] should be a valid email') try: BaseModel().is_valid_email( '*****@*****.**') except: self.fail('Error: this has the correct syntax for a valid email') try: BaseModel().is_valid_email('*****@*****.**') except: self.fail('Error: this has the correct syntax for a valid email') with self.assertRaises(AssertionError): BaseModel().is_valid_email('[email protected]') with self.assertRaises(AssertionError): BaseModel().is_valid_email('invalidEmail@yahoo!.com') with self.assertRaises(AssertionError): BaseModel().is_valid_email('hello') with self.assertRaises(Exception): BaseModel().is_valid_email(1) with self.assertRaises(Exception): BaseModel().is_valid_email(1.5) with self.assertRaises(Exception): BaseModel().is_valid_email(True) with self.assertRaises(Exception): BaseModel().is_valid_email([1, 2, 3]) with self.assertRaises(Exception): BaseModel().is_valid_email(['a', 'b', 'c'])
def test_strictSchema(self): self.assertEqual(BaseModel().strictSchema(), False) self.assertEqual(MockModel().strictSchema(), True)
def test_requiredFields(self): self.assertEqual(BaseModel().requiredFields(), []) self.assertEqual(MockModel().requiredFields(), ['a', 'b', 'c', 'x'])
if __name__ == "__main__": device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') checkpoint_path = "weights/CCPD/CCPD_150.pth" img_dir = [ "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_weather", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_blur", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_tilt", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_db", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_fn", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_rotate", # "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_np", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_challenge" ] print("loading model") # Initialize model model = BaseModel(cfg=cfg_plate) checkpoint = torch.load(checkpoint_path, map_location='cuda') model.load_state_dict(checkpoint['state_dict']) del checkpoint model.eval() model.to(device) for i in np.linspace(0.5, 0.9, 8): print("############################") print("threshold: " + str(i)) for index, path in enumerate(img_dir): print("**************************") print(path) val_dataset = ChaLocDataLoader([path], imgSize=320) valid_loader = torch.utils.data.DataLoader( val_dataset,