def main(args): # jsonファイルから学習モデルのパラメータを取得する p = ['unit', 'shape', 'shuffle_rate', 'actfun1', 'actfun2'] unit, shape, sr, af1, af2 = G.jsonData(args.param, p) af1 = G.actfun(af1) af2 = G.actfun(af2) ch, size = shape[:2] # 学習モデルを生成する model = L.Classifier( JC(n_unit=unit, n_out=ch, rate=sr, actfun1=af1, actfun2=af2) ) # load_npzのpath情報を取得し、学習済みモデルを読み込む load_path = F.checkModelType(args.model) try: chainer.serializers.load_npz(args.model, model, path=load_path) except: import traceback traceback.print_exc() print(F.fileFuncLine()) exit() # GPUの設定 if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() # else: # model.to_intel64() # 高圧縮画像の生成 org_imgs = I.io.readN(args.jpeg, ch) ed_imgs = [encDecWrite(img, ch, args.quality, args.out_path, i) for i, img in enumerate(org_imgs)] imgs = [] with chainer.using_config('train', False): # 学習モデルを入力画像ごとに実行する for i, ei in enumerate(ed_imgs): img = predict( model, I.cnv.splitSQ(ei, size), args.batch, ei.shape, sr, args.gpu ) # 生成結果を保存する name = F.getFilePath( args.out_path, 'comp-' + str(i * 10 + 1).zfill(3), '.jpg' ) print('save:', name) cv2.imwrite(name, img) imgs.append(img) # オリジナル、高圧縮、推論実行結果を連結して保存・表示する c3i = [concat3Images([i, j, k], 50, 333, ch, 1) for i, j, k in zip(org_imgs, ed_imgs, imgs)] for i, img in enumerate(c3i): path = F.getFilePath( args.out_path, 'concat-' + str(i * 10).zfill(3), '.jpg' ) cv2.imwrite(path, img) cv2.imshow(path, img) cv2.waitKey()
def main(args): # スナップショットとモデルパラメータのパスを取得する snapshot_path, param = getSnapshotAndParam(args.snapshot_and_json) # jsonファイルから学習モデルのパラメータを取得する p = ['unit', 'shape', 'shuffle_rate', 'actfun1', 'actfun2'] unit, shape, sr, af1, af2 = G.jsonData(param, p) af1 = G.actfun(af1) af2 = G.actfun(af2) ch, size = shape[:2] # 推論実行するために画像を読み込んで結合する img = getImage(args.jpeg, ch, size, args.img_num, args.random_seed) # 学習モデルを生成する model = L.Classifier( JC(n_unit=unit, n_out=ch, rate=sr, actfun1=af1, actfun2=af2) ) out_imgs = [img] for s in snapshot_path: print(s) # load_npzのpath情報を取得する load_path = F.checkModelType(s) # 学習済みモデルの読み込み try: chainer.serializers.load_npz(s, model, path=load_path) except: import traceback traceback.print_exc() print(F.fileFuncLine()) exit() # GPUの設定 if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() else: model.to_intel64() # 学習モデルを入力画像ごとに実行する ed = encDecWrite(img, ch, args.quality) with chainer.using_config('train', False): out_imgs.append( predict(model, I.cnv.splitSQ(ed, size), args.batch, ed.shape, sr, args.gpu) ) # 推論実行した各画像を結合してサイズを調整する img = stackImages(out_imgs, args.img_rate) # 生成結果の表示 cv2.imshow('predict some snapshots', img) cv2.waitKey() # 生成結果の保存 cv2.imwrite(F.getFilePath(args.out_path, 'snapshots.jpg'), img)
def main(args): # jsonファイルから学習モデルのパラメータを取得する n_out, n_unit, actfun = GET.jsonData(args.param, ['n_out', 'n_unit', 'actfun']) # 学習モデルを生成する model = L.Classifier( CNT(n_out, n_unit, GET.actfun(actfun), base=L.ResNet50Layers(None))) # load_npzのpath情報を取得し、学習済みモデルを読み込む load_path = F.checkModelType(args.model) try: chainer.serializers.load_npz(args.model, model, path=load_path) except: import traceback traceback.print_exc() print(F.fileFuncLine()) exit() # GPUの設定 if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() # else: # model.to_intel64() # 画像の生成 if (args.human_num < 0): h_num = np.random.randint(0, 4) else: h_num = args.human_num if args.image == '': x = create(args.other_path, args.human_path, args.background_path, args.obj_size, args.img_size, args.obj_num, h_num, 1)[0] print(x.shape) elif IMG.isImgPath(args.image): x = cv2.cvtColor(cv2.imread(args.image, IMG.getCh(0)), cv2.COLOR_RGB2BGR) else: print('input image path is not found:', args.image) exit() t = img2resnet(np.array(x)) # 学習モデルを実行する with chainer.using_config('train', False): st = time.time() y = model.predictor(t) num = y[0].data.argmax() print('exec time: {0:.2f}[s]'.format(time.time() - st)) print('result:', num) # 生成結果を保存する name = F.getFilePath(args.out_path, 'predict-' + str(num).zfill(2), '.jpg') print('save:', name) cv2.imwrite(name, x) cv2.imshow(name, x) cv2.waitKey()
def main(args): # 各種データをユニークな名前で保存するために時刻情報を取得する exec_time = GET.datetimeSHA() # Load dataset train, test, n_out = getDataset(args.in_path) # モデルを決定する actfun = GET.actfun(args.actfun) model = L.Classifier(CNT(n_out, args.n_unit, actfun, args.dropout)) if args.gpu_id >= 0: # Make a specified GPU current chainer.backends.cuda.get_device_from_id(args.gpu_id).use() model.to_gpu() # Copy the model to the GPU chainer.global_config.autotune = True # else: # model.to_intel64() # Setup an optimizer optimizer = GET.optimizer(args.optimizer).setup(model) for func_name in model.predictor.base._children: for param in model.predictor.base[func_name].params(): param.update_rule.hyperparam.alpha *= args.alpha # Setup iterator train_iter = MultiprocessIterator(train, args.batchsize) test_iter = MultiprocessIterator(test, args.batchsize, repeat=False, shuffle=False) # train_iter = chainer.iterators.SerialIterator(train, args.batchsize) # test_iter = chainer.iterators.SerialIterator(test, args.batchsize, # repeat=False, shuffle=False) # Set up a trainer updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu_id) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out_path) # Evaluate the model with the test dataset for each epoch trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu_id)) # Dump a computational graph from 'loss' variable at the first iteration # The "main" refers to the target link of the "main" optimizer. trainer.extend( extensions.dump_graph('main/loss', out_name=exec_time + '_graph.dot')) # Take a snapshot for each specified epoch frequency = args.epoch if args.frequency == -1 else max(1, args.frequency) trainer.extend(extensions.snapshot(filename=exec_time + '_{.updater.epoch}.snapshot'), trigger=(frequency, 'epoch')) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport(log_name=exec_time + '.log')) # trainer.extend(extensions.observe_lr()) # Save two plot images to the result dir if args.plot and extensions.PlotReport.available(): trainer.extend( PlotReportLog(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) trainer.extend( extensions.PlotReport( ['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='acc.png')) # trainer.extend( # PlotReportLog(['lr'], # 'epoch', file_name='lr.png', val_pos=(-80, -60)) # ) # Print selected entries of the log to stdout # Here "main" refers to the target link of the "main" optimizer again, and # "validation" refers to the default name of the Evaluator extension. # Entries other than 'epoch' are reported by the Classifier link, called by # either the updater or the evaluator. trainer.extend( extensions.PrintReport([ 'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', # 'lr', 'elapsed_time' ])) # Print a progress bar to stdout trainer.extend(extensions.ProgressBar()) if args.resume: # Resume from a snapshot chainer.serializers.load_npz(args.resume, trainer) # Set pruning # http://tosaka2.hatenablog.com/entry/2017/11/17/194051 masks = pruning.create_model_mask(model, args.pruning, args.gpu_id) trainer.extend(pruning.pruned(model, masks)) # predict.pyでモデルを決定する際に必要なので記憶しておく model_param = F.args2dict(args) model_param['shape'] = train[0][0].shape model_param['n_out'] = n_out if args.only_check is False: # predict.pyでモデルのパラメータを読み込むjson形式で保存する with open(F.getFilePath(args.out_path, exec_time, '.json'), 'w') as f: json.dump(model_param, f, indent=4, sort_keys=True) # Run the training trainer.run() # 最後にモデルを保存する # スナップショットを使ってもいいが、 # スナップショットはファイルサイズが大きいので chainer.serializers.save_npz( F.getFilePath(args.out_path, exec_time, '.model'), model)
def test_actfun(self): self.assertEqual(GET.actfun('relu').__name__, 'relu') self.assertEqual(GET.actfun('elu').__name__, 'elu') self.assertEqual(GET.actfun('c_relu').__name__, 'clipped_relu') self.assertEqual(GET.actfun('l_relu').__name__, 'leaky_relu') self.assertEqual(GET.actfun('sigmoid').__name__, 'sigmoid') self.assertEqual(GET.actfun('h_sigmoid').__name__, 'hard_sigmoid') self.assertEqual(GET.actfun('tanh').__name__, 'tanh') self.assertEqual(GET.actfun('s_plus').__name__, 'softplus') self.assertEqual(GET.actfun('none').__name__, 'F_None') self.assertEqual(GET.actfun('test').__name__, 'relu') self.assertEqual(GET.actfun('').__name__, 'relu')
def main(args): # jsonファイルから学習モデルのパラメータを取得する n_out, n_unit, actfun = GET.jsonData(args.param, ['n_out', 'n_unit', 'actfun']) # 学習モデルを生成する model = L.Classifier( CNT(n_out, n_unit, GET.actfun(actfun), base=L.ResNet50Layers(None))) # load_npzのpath情報を取得し、学習済みモデルを読み込む load_path = FNC.checkModelType(args.model) try: chainer.serializers.load_npz(args.model, model, path=load_path) except: import traceback traceback.print_exc() print(FNC.fileFuncLine()) exit() # GPUの設定 if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() xp = cupy else: xp = np # model.to_intel64() # 画像の生成 x = [] t = [] for i in range(n_out): x.extend( create(args.other_path, args.human_path, args.background_path, args.obj_size, args.img_size, args.obj_num, i, args.img_num)) t.extend([i] * args.img_num) x = imgs2resnet(np.array(x), xp) t = xp.array(t, dtype=np.int8) print(x.shape, t.shape) # 学習モデルを実行する with chainer.using_config('train', False): st = time.time() y = model.predictor(x) print('exec time: {0:.2f}[s]'.format(time.time() - st)) # 適合率(precisiton)と再現率(recall)とF値を検証する # precision: 正解の人数を答えたうち、本当に正解の人数だった確率 # (正解が一人の場合に)別の人数を回答すると下がる # recall: 正解の人数に対して、本当に正解の人数を答えられた確率 # (正解が一人でない場合に)一人だと回答すると下がる # F score: 2/((1/recall)+(1/precision)) print('t:', t) print('y:', y.data.argmax(axis=1)) p, r, f, _ = F.classification_summary(y, t) precision = p.data.tolist() recall = r.data.tolist() F_score = f.data.tolist() print('num|precision|recall|F') [ print('{0:3}| {1:4.3f}| {2:4.3f}| {3:4.3f}'.format( i, elem[0], elem[1], elem[2])) for i, elem in enumerate(zip(precision, recall, F_score)) ]
def main(args): # 各種データをユニークな名前で保存するために時刻情報を取得する exec_time = GET.datetimeSHA() # Set up a neural network to train # Classifier reports softmax cross entropy loss and accuracy at every # iteration, which will be used by the PrintReport extension below. # 活性化関数を取得する actfun1 = GET.actfun(args.actfun1) actfun2 = GET.actfun(args.actfun2) # モデルを決定する if args.network == 0: from Lib.network import JC_DDUU as JC else: from Lib.network2 import JC_UDUD as JC model = L.Classifier( JC(n_unit=args.unit, layer=args.layer_num, rate=args.shuffle_rate, actfun1=actfun1, actfun2=actfun2, dropout=args.dropout, view=args.only_check), lossfun=GET.lossfun(args.lossfun) ) # Accuracyは今回使用しないのでFalseにする # もしも使用したいのであれば、自分でAccuracyを評価する関数を作成する必要あり? model.compute_accuracy = False # Setup an optimizer optimizer = GET.optimizer(args.optimizer).setup(model) # Load dataset train, test, _ = GET.imgData(args.in_path) train = ResizeImgDataset(train, args.shuffle_rate) test = ResizeImgDataset(test, args.shuffle_rate) # predict.pyでモデルを決定する際に必要なので記憶しておく model_param = F.args2dict(args) model_param['shape'] = train[0][0].shape train_iter = chainer.iterators.SerialIterator(train, args.batchsize) test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False) # Set up a trainer updater = training.StandardUpdater( train_iter, optimizer, device=args.gpu_id ) trainer = training.Trainer( updater, (args.epoch, 'epoch'), out=args.out_path ) # Evaluate the model with the test dataset for each epoch trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu_id)) # Dump a computational graph from 'loss' variable at the first iteration # The "main" refers to the target link of the "main" optimizer. trainer.extend( extensions.dump_graph('main/loss', out_name=exec_time + '_graph.dot') ) # Take a snapshot for each specified epoch frequency = args.epoch if args.frequency == -1 else max(1, args.frequency) trainer.extend( extensions.snapshot(filename=exec_time + '_{.updater.epoch}.snapshot'), trigger=(frequency, 'epoch') ) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport(log_name=exec_time + '.log')) # trainer.extend(extensions.observe_lr()) # Save two plot images to the result dir if args.plot and extensions.PlotReport.available(): trainer.extend( PlotReportLog(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png') ) # trainer.extend( # PlotReportLog(['lr'], # 'epoch', file_name='lr.png', val_pos=(-80, -60)) # ) # Print selected entries of the log to stdout # Here "main" refers to the target link of the "main" optimizer again, and # "validation" refers to the default name of the Evaluator extension. # Entries other than 'epoch' are reported by the Classifier link, called by # either the updater or the evaluator. trainer.extend(extensions.PrintReport([ 'epoch', 'main/loss', 'validation/main/loss', # 'lr', 'elapsed_time' ])) # Print a progress bar to stdout trainer.extend(extensions.ProgressBar()) # Resume from a snapshot if args.resume: chainer.serializers.load_npz(args.resume, trainer) # Set pruning # http://tosaka2.hatenablog.com/entry/2017/11/17/194051 masks = pruning.create_model_mask(model, args.pruning, args.gpu_id) trainer.extend(pruning.pruned(model, masks)) # Make a specified GPU current if args.gpu_id >= 0: chainer.backends.cuda.get_device_from_id(args.gpu_id).use() # Copy the model to the GPU model.to_gpu() chainer.global_config.autotune = True else: model.to_intel64() # predict.pyでモデルのパラメータを読み込むjson形式で保存する if args.only_check is False: F.dict2json(args.out_path, exec_time + '_train', model_param) # Run the training trainer.run() # 最後にモデルを保存する # スナップショットを使ってもいいが、 # スナップショットはファイルサイズが大きい chainer.serializers.save_npz( F.getFilePath(args.out_path, exec_time, '.model'), model )