def load_images(IMG_DIR): dir_names = glob.glob('{}/*'.format(IMG_DIR)) file_names = [glob.glob('{}/*.jpg'.format(dir)) for dir in dir_names] file_names = list(chain.from_iterable(file_names)) labels = [os.path.basename(os.path.dirname(file)) for file in file_names] dir_names = [os.path.basename(dir) for dir in dir_names] labels = [dir_names.index(label) for label in labels] d = datasets.LabeledImageDataset(list(zip(file_names, labels))) def resize(img): width, height = 224, 224 img = Image.fromarray(img.transpose(1, 2, 0)) img = img.resize((width, height), Image.BICUBIC) return np.asarray(img).transpose(2, 0, 1) def transform(inputs): img, label = inputs img = img[:3, ...] img = resize(img.astype(np.uint8)) img = img.astype(np.float32) img = img / 255 return img, label transformed_d = datasets.TransformDataset(d, transform) return transformed_d
def test_LabeledImageDataset(self): file_path = os.path.join(TEST_INPUT_DIR_PATH, TEXT_FILE_NAME) root = TEST_INPUT_DIR_PATH dataset = datasets.LabeledImageDataset(file_path, root) self.assertTrue(len(dataset) == 4) answer_labels = [0, 1, 2, 3] for answer_label, (image, label) in zip(answer_labels, dataset): self.assertTrue(answer_label == label) self.assertTrue(image.shape == (3, 256, 256))
def __init__(self, path, root, mean, crop_size, random=True, is_scaled=True): self.base = datasets.LabeledImageDataset(path, root) self.mean = mean.astype('f') self.crop_size = crop_size self.random = random self.is_scaled = is_scaled
def main(): gpu_id = 0 batchsize = 10 report_keys = ["loss_dis", "loss_gen"] train_dataset = datasets.LabeledImageDataset(str(argv[1])) train_iter = iterators.SerialIterator(train_dataset, batchsize) models = [] generator = Generator() discriminator = Discriminator() opts = {} opts["opt_gen"] = setup_adam_optimizer(generator) opts["opt_dis"] = setup_adam_optimizer(discriminator) models = [generator, discriminator] chainer.cuda.get_device_from_id(gpu_id).use() print("use gpu {}".format(gpu_id)) for m in models: m.to_gpu() updater_args = { "iterator": { 'main': train_iter }, "device": gpu_id, "optimizer": opts, "models": models } output = 'result' display_interval = 20 evaluation_interval = 1000 max_iter = 10000 updater = Updater(**updater_args) trainer = training.Trainer(updater, (max_iter, 'iteration'), out=output) trainer.extend( extensions.LogReport(keys=report_keys, trigger=(display_interval, 'iteration'))) trainer.extend(extensions.PrintReport(report_keys), trigger=(display_interval, 'iteration')) trainer.extend(sample_generate(generator, output), trigger=(evaluation_interval, 'iteration'), priority=extension.PRIORITY_WRITER) trainer.run()
def main(): infer_net = Filter() gpu_id = 0 infer_net.to_gpu(gpu_id) test_data = datasets.LabeledImageDataset(argv[1]) x = test_data[200][0] print(test_data[200][0]) x = x[None, ] x = infer_net.xp.asarray(x) with chainer.using_config('train', False), chainer.using_config( 'enable_backprop', False): y = infer_net(x) y = to_cpu(y.array) #print(y) y = y[0] y = np.array(np.clip(y * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8) _, H, W = y.shape y = F.transpose(y) im = Image.fromarray(y.data) im.show()
def train(settings: dict, output_path: PosixPath): """Main.""" gpu_num = len(settings["gpu_devices"]) # # make dataset # # # read meta info. train_df = pd.read_csv( config.PROC_DATA / "train_add-{}fold-index.csv".format(settings["n_folds"])) # # # make label arr train_labels_arr = train_df[config.COMP_NAMES].values.astype("i") # # # make train set if settings["val_fold"] != -1: train_dataset = datasets.LabeledImageDataset( pairs=list( zip((train_df[train_df["fold"] != settings["val_fold"]] ["image_id"] + ".png").tolist(), train_labels_arr[train_df["fold"] != settings["val_fold"], ...])), root=config.TRAIN_IMAGES_DIR.as_posix()) else: train_dataset = datasets.LabeledImageDataset( pairs=list( zip((train_df["image_id"] + ".png").tolist(), train_labels_arr)), root=config.TRAIN_IMAGES_DIR.as_posix()) train_dataset = datasets.TransformDataset( train_dataset, nn_training.ImageTransformer(settings["training_transforms"])) if gpu_num > 1: # # if using multi-gpu, split train set into gpu_num. train_sub_dataset_list = [] total_size = len(train_dataset) subset_size = (total_size + gpu_num - 1) // gpu_num np.random.seed(1086) random_order = np.random.permutation(len(train_dataset)) for i in range(gpu_num): start_idx = min(i * subset_size, total_size - subset_size) end_idx = min((i + 1) * subset_size, total_size) print(i, start_idx, end_idx) train_sub_dataset_list.append( datasets.SubDataset(train_dataset, start=start_idx, finish=end_idx, order=random_order)) train_dataset = train_sub_dataset_list for i, subset in enumerate(train_dataset): print("subset{}: {}".format(i, len(subset))) # # # # validation set if settings["val_fold"] != -1: val_dataset = datasets.LabeledImageDataset( pairs=list( zip((train_df[train_df["fold"] == settings["val_fold"]] ["image_id"] + ".png").tolist(), train_labels_arr[train_df["fold"] == settings["val_fold"], ...])), root=config.TRAIN_IMAGES_DIR.as_posix()) else: # # if train models using all train data, calc loss for all data at the evaluation step. val_dataset = datasets.LabeledImageDataset( pairs=list( zip((train_df["image_id"] + ".png").tolist(), train_labels_arr)), root=config.TRAIN_IMAGES_DIR.as_posix()) val_dataset = datasets.TransformDataset( val_dataset, nn_training.ImageTransformer(settings["inference_transforms"])) print("[make dataset] train: {}, val: {}".format(len(train_dataset), len(val_dataset))) # # initialize model. model = nn_training.ImageClassificationModel( extractor=getattr( backborn_chains, settings["backborn_class"])(**settings["backborn_kwargs"]), global_pooling=None if settings["pooling_class"] is None else getattr( global_pooling_chains, settings["pooling_class"])( **settings["pooling_kwargs"]), classifier=getattr(classifer_chains, settings["head_class"])(**settings["head_kwargs"])) model.name = settings["model_name"] # # set training wrapper. train_model = nn_training.CustomClassifier( predictor=model, lossfun=getattr( nn_training, settings["loss_function"][0])(**settings["loss_function"][1]), evalfun_dict={ "SCE_{}".format(i): getattr(nn_training, name)(**param) for i, (name, param) in enumerate(settings["eval_functions"]) }) settings["eval_func_names"] = [ "SCE_{}".format(i) for i in range(len(settings["eval_functions"])) ] gc.collect() # # training. # # # create trainer. utils.set_random_seed(settings["seed"]) trainer = nn_training.create_trainer(settings, output_path.as_posix(), train_model, train_dataset, val_dataset) trainer.run() # # # save model of last epoch, model = trainer.updater.get_optimizer('main').target.predictor serializers.save_npz(output_path / "model_snapshot_last_epoch.npz", model) del trainer del train_model gc.collect() # # inference validation data by the model of last epoch. _, val_iter, _ = nn_training.create_iterator(settings, None, val_dataset, None) val_pred, val_label = nn_training.inference_test_data( model, val_iter, gpu_device=settings["gpu_devices"][0]) np.save(output_path / "val_pred_arr_fold{}".format(settings["val_fold"]), val_pred) # # calc validation score score_list = [[] for i in range(2)] for i in range(len(config.N_CLASSES)): y_pred_subset = val_pred[:, config.COMP_INDEXS[i]:config. COMP_INDEXS[i + 1]].argmax(axis=1) y_true_subset = val_label[:, i] score_list[0].append( recall_score(y_true_subset, y_pred_subset, average='macro', zero_division=0)) score_list[1].append( recall_score(y_true_subset, y_pred_subset, average='macro', zero_division=1)) score_list[0].append(np.average(score_list[0], weights=[2, 1, 1])) score_list[1].append(np.average(score_list[1], weights=[2, 1, 1])) score_df = pd.DataFrame(score_list, columns=config.COMP_NAMES + ["score"]) print(score_df) score_df.to_csv(output_path / "score.csv", index=False)
def test_invalid_column(self): root = os.path.join(os.path.dirname(__file__), 'image_dataset') path = os.path.join(root, 'img.lst') with self.assertRaises(ValueError): datasets.LabeledImageDataset(path)
def setUp(self): root = os.path.join(os.path.dirname(__file__), 'image_dataset') path = os.path.join(root, 'labeled_img.lst') self.dataset = datasets.LabeledImageDataset( path, root=root, dtype=self.dtype, label_dtype=self.label_dtype)
def get_datasets(): labels = pd.read_csv(LabelsPath, index_col=0) # label: 1 -> 102 ds = datasets.LabeledImageDataset(list(zip(labels["image"], labels["label"] - 1)), PreProcessedFlowerImagesDirectory) return datasets.split_dataset_random(ds, int(len(ds) * 0.8), seed=SplitDatasetSeed)
def inference_by_snapshot_ensemble(trained_path: PosixPath, output_path: PosixPath, gpu_device: int = -1, batch_size: int = 64): """Inference function for kernel.""" # # read settings from training outputs directory. with open((trained_path / "settings.yml").as_posix(), "r") as fr: settings = yaml.safe_load(fr) # # make dataset # # # test set with utils.timer("make test dataset"): test_df = pd.read_csv(config.PROC_DATA / "test_reshaped.csv") sample_sub = pd.read_csv(config.RAW_DATA / "sample_submission.csv") # # # # make chainer dataset test_dataset = datasets.LabeledImageDataset( pairs=list( zip((test_df["image_id"] + ".png").tolist(), ([-1] * len(test_df)))), root=config.TEST_IMAGES_DIR.as_posix()) # # # # set transform test_dataset = datasets.TransformDataset( test_dataset, nn_training.ImageTransformer(settings["inference_transforms"])) # # # prepare model paths model_path_list = [] model_weight = [] for epoch_of_model in range(settings["epoch_per_cycle"], settings["max_epoch"] + 1, settings["epoch_per_cycle"]): model_path = trained_path / "model_snapshot_{}.npz".format( epoch_of_model) if os.path.isfile(model_path): model_path_list.append(model_path) model_weight.append(1) if len(model_path_list) == 0: model_path_list.append(trained_path / "model_snapshot_last_epoch.npz") model_weight.append(1) print("[using models]") print(model_path_list) # # # prepare preds numpy.ndarray of shape: (n_model, n_test, n_class) test_preds_arr = np.zeros( (len(model_path_list), len(test_df), sum(config.N_CLASSES)), dtype="f") # # inference with utils.timer("inference test set"): for idx, model_path in enumerate(model_path_list): # # # create iterator. test_iter = nn_training.create_iterator(settings, None, None, test_dataset)[-1] # # # init and load model model = init_model(settings) serializers.load_npz(model_path, model) # # # move model to gpu model.to_gpu(gpu_device) # # # inference test_preds_arr[idx] = nn_training.inference_test_data( model, test_iter, gpu_device=gpu_device)[0] del test_iter del model gc.collect() del test_dataset np.save( output_path / "test_all_preds_arr_fold{}".format(settings["val_fold"]), test_preds_arr) # # ensemble (weighted averaging) with utils.timer("snapshot ensemble"): # # # convert logits to probs for i in range(len(config.N_CLASSES)): test_preds_arr[..., config.COMP_INDEXS[i]:config.COMP_INDEXS[i + 1]] =\ functions.softmax(test_preds_arr[..., config.COMP_INDEXS[i]:config.COMP_INDEXS[i + 1]]).data test_pred = np.average(test_preds_arr, axis=0, weights=model_weight) np.save( output_path / "test_pred_arr_fold{}".format(settings["val_fold"]), test_pred) with utils.timer("make submission"): # # convert prob to pred id for i, c_name in enumerate(config.COMP_NAMES): test_pred_subset = test_pred[:, config.COMP_INDEXS[i]:config. COMP_INDEXS[i + 1]].argmax(axis=1) test_df[c_name] = test_pred_subset del test_pred_subset del test_pred gc.collect() # # # reshape test_df to submisson format. melt_df = pd.melt(test_df, id_vars="image_id", value_vars=config.COMP_NAMES, value_name="target") melt_df["row_id"] = melt_df["image_id"] + "_" + melt_df["variable"] submission_df = pd.merge(sample_sub[["row_id"]], melt_df[["row_id", "target"]], on="row_id", how="left") submission_df.to_csv(output_path / "submission.csv", index=False)
sep=' ') # スペース区切り df.head() # 画像ファイルの数を数えます。 # In[4]: len(df) # ### データセットの準備 # datasets.LabeledImageDataset(先ほどのcsvのパス)を実行すると、chainerが扱いやすい形に画像データと教師データを読み込んでくれます # In[5]: from chainer import datasets dataset = datasets.LabeledImageDataset(image_files) dataset # データセットがどんなデータか確認します。まずはdatasetの件数を確認します。 # In[6]: len(dataset) # datasetの件数=画像ファイル数ということがわかりました。では、[0]を入れて1レコード目のデータを抽出してみます。 # In[7]: dataset[0] # In[8]:
def inference(trained_path: PosixPath, output_path: PosixPath, epoch_of_model: int = -1, gpu_device: int = -1, batch_size: int = 64, inference_valid: bool = False): """Inference function for kernel.""" # # read settings from training outputs directory. with open((trained_path / "settings.yml").as_posix(), "r") as fr: settings = yaml.safe_load(fr) # # make dataset # # # read meta info. with utils.timer("make val dataset"): val_dataset = test_dataset = None if inference_valid: train_df = pd.read_csv( config.PROC_DATA / "train_add-{}fold-index.csv".format(settings["n_folds"])) # # # # make label arr train_labels_arr = train_df[config.COMP_NAMES].values.astype("i") # # # # make chainer dataset val_dataset = datasets.LabeledImageDataset( pairs=list( zip((train_df[train_df["fold"] == settings["val_fold"]] ["image_id"] + ".png").tolist(), train_labels_arr[ train_df["fold"] == settings["val_fold"], ...])), root=config.TRAIN_IMAGES_DIR.as_posix()) # # # # set transform val_dataset = datasets.TransformDataset( val_dataset, nn_training.ImageTransformer(settings["inference_transforms"])) # # # test set with utils.timer("make test dataset"): test_df = pd.read_csv(config.PROC_DATA / "test_reshaped.csv") sample_sub = pd.read_csv(config.RAW_DATA / "sample_submission.csv") # # # # make chainer dataset test_dataset = datasets.LabeledImageDataset( pairs=list( zip((test_df["image_id"] + ".png").tolist(), ([-1] * len(test_df)))), root=config.TEST_IMAGES_DIR.as_posix()) # # # # set transform test_dataset = datasets.TransformDataset( test_dataset, nn_training.ImageTransformer(settings["inference_transforms"])) with utils.timer("init and load model"): # # initialize model. settings["backborn_kwargs"]["pretrained_model_path"] = None model = nn_training.ImageClassificationModel( extractor=getattr( backborn_chains, settings["backborn_class"])(**settings["backborn_kwargs"]), global_pooling=None if settings["pooling_class"] is None else getattr(global_pooling_chains, settings["pooling_class"])( **settings["pooling_kwargs"]), classifier=getattr( classifer_chains, settings["head_class"])(**settings["head_kwargs"])) # # load model. model_path = trained_path / "model_snapshot_{}.npz".format( epoch_of_model) print(model_path) if not (epoch_of_model != -1 and os.path.isfile(model_path)): model_path = trained_path / "model_snapshot_last_epoch.npz" print("use model: {}".format(model_path)) serializers.load_npz(model_path, model) if gpu_device != -1: model.to_gpu(gpu_device) gc.collect() settings["batch_size"] = batch_size _, val_iter, test_iter = nn_training.create_iterator( settings, None, val_dataset, test_dataset) if inference_valid: with utils.timer("inference validation set"): val_pred, val_label = nn_training.inference_test_data( model, val_iter, gpu_device=gpu_device) np.save( output_path / "val_pred_arr_fold{}".format(settings["val_fold"]), val_pred) # # calc score score_list = [[] for i in range(2)] for i in range(len(config.N_CLASSES)): y_pred_subset = val_pred[:, config.COMP_INDEXS[i]:config. COMP_INDEXS[i + 1]].argmax(axis=1) y_true_subset = val_label[:, i] score_list[0].append( recall_score(y_true_subset, y_pred_subset, average='macro', zero_division=0)) score_list[1].append( recall_score(y_true_subset, y_pred_subset, average='macro', zero_division=1)) del val_dataset del val_iter del val_pred del val_label del y_pred_subset del y_true_subset gc.collect() score_list[0].append(np.average(score_list[0], weights=[2, 1, 1])) score_list[1].append(np.average(score_list[1], weights=[2, 1, 1])) score_df = pd.DataFrame(score_list, columns=config.COMP_NAMES + ["score"]) print("[score for validation set]") print(score_df) score_df.to_csv(output_path / "score.csv", index=False) with utils.timer("inference test set"): test_pred, test_label = nn_training.inference_test_data( model, test_iter, gpu_device=gpu_device) del test_label np.save( output_path / "test_pred_arr_fold{}".format(settings["val_fold"]), test_pred) with utils.timer("make submission"): # # # arg max for each component. for i, c_name in enumerate(config.COMP_NAMES): test_pred_subset = test_pred[:, config.COMP_INDEXS[i]:config. COMP_INDEXS[i + 1]].argmax(axis=1) test_df[c_name] = test_pred_subset del test_pred gc.collect() # # # reshape test_df to submisson format. melt_df = pd.melt(test_df, id_vars="image_id", value_vars=config.COMP_NAMES, value_name="target") melt_df["row_id"] = melt_df["image_id"] + "_" + melt_df["variable"] submission_df = pd.merge(sample_sub[["row_id"]], melt_df[["row_id", "target"]], on="row_id", how="left") submission_df.to_csv(output_path / "submission.csv", index=False)
def main(): # 出力フォルダ配下のファイル全削除 remove_dir_and_file(u'result') # 画像ファイル名と教師データの一覧ファイルのパス格納 image_files = os.path.join(u'dataset', '03_duplicate_pict_anser.csv') # datasets.LabeledImageDatasetでいい感じにデータセットとして読み込んでくれます。 dataset = datasets.LabeledImageDataset(image_files) #print (u'dataset') #print dataset[0] #print (u'---') # データ部を0〜1の値にする必要があるため255で割ります dataset = chainer.datasets.TransformDataset(dataset, transform) # 8割を学習データに、2割をテストデータにします。 split_at = int(len(dataset) * 0.8) train, test = datasets.split_dataset(dataset, split_at) # バッチ実行か、シャッフルしてデータ使うかなどの指定 train_iter = iterators.SerialIterator(train, batchsize, shuffle=True) test_iter = iterators.SerialIterator(test, batchsize, repeat=False, shuffle=True) # モデルの定義。GPU使うかもここで指定。 # model = MLP() # model.to_gpu(gpu_id) # モデルをClassifierで包んで、ロスの計算などをモデルに含める model = MLP() model = L.Classifier(model) model.to_gpu(gpu_id) # 最適化手法の選択 optimizer = optimizers.SGD() optimizer.setup(model) # UpdaterにIteratorとOptimizerを渡す updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id) # TrainerにUpdaterを渡す trainer = training.Trainer(updater, (max_epoch, 'epoch'), out='result') # ログの出力方法などの定義 trainer.extend(extensions.LogReport()) trainer.extend( extensions.snapshot(filename='snapshot_epoch-{.updater.epoch}')) trainer.extend( extensions.snapshot_object(model.predictor, filename='model_epoch-{.updater.epoch}')) trainer.extend(extensions.Evaluator(test_iter, model, device=gpu_id)) trainer.extend( extensions.PrintReport([ 'epoch', 'main/loss', 'main/accuracy', 'validation/main/loss', 'validation/main/accuracy', 'elapsed_time' ])) trainer.extend( extensions.PlotReport(['main/loss', 'validation/main/loss'], x_key='epoch', file_name='loss.png')) trainer.extend( extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], x_key='epoch', file_name='accuracy.png')) trainer.extend(extensions.dump_graph('main/loss')) # トレーニングスタート!! trainer.run() # 学習結果の保存 model.to_cpu() # CPUで計算できるようにしておく serializers.save_npz(os.path.join(u'result', u'sakamotsu.model'), model)
def __init__(self, img_list, root): self.base = datasets.LabeledImageDataset(img_list, root)
def on_status(self, status): status.created_at += datetime.timedelta(hours=9) # リプライが来た場合 if str(status.in_reply_to_screen_name) == bot_user_name: # テキストメッセージ tweet_text = "@" + str(status.user.screen_name) + " " # タイムラインを取得 time_line = api.mentions_timeline() # タイムラインの先頭のメッセージ内容 print("リプライが届きました...\n[@" + status.user.screen_name + "]\n" + time_line[0].text + "\n") # ファイル名の先頭 date_name = re.split(' ', str(datetime.datetime.today()))[0] + '_' # 1.リプライ画像の保存 -> 2.顔を切り取りcat.jpgで保存 -> 3.chainerに通して判定 # 1.リプライ画像の保存 try: j = 0 reply_images = [] for img in time_line[0].extended_entities['media']: # print(img['media_url']) reply_image = urllib.request.urlopen(img['media_url']) # ファイル名を確定後、リストに格納 image_name = date_name + str( time_line[0].id) + '-' + str(j) + '.jpg' reply_images.append(image_name) # 画像を読み込んで保存 image_file = open(image_name, 'wb') image_file.write(reply_image.read()) image_file.close() reply_image.close() print('画像 ' + image_name + ' を保存しました') j = j + 1 except: # 例外処理 if j == 0: tweet_text += "Error:画像がありませんฅ(´・ω・`)ฅにゃーん" else: tweet_text += "Error:画像の保存に失敗しましたฅ(´・ω・`)ฅにゃーん" api.update_status(status=tweet_text, in_reply_to_status_id=status.id) print(tweet_text) return True # 2.顔を切り取りcat.jpgで保存 try: image = cv2.imread(reply_images[0]) image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cascade = cv2.CascadeClassifier("cat_cascade.xml") face_images = cascade.detectMultiScale(image_gray, scaleFactor=1.1, minNeighbors=1, minSize=(1, 1)) face_image_len = 0 if len(face_images) > 0: for (x, y, w, h) in face_images: face_image = image[y:y + h, x:x + w] if face_image_len < w: face_image_len = w cv2.imwrite("cat_face.jpg", face_image) face_image = cv2.resize(face_image, (64, 64)) cv2.imwrite("cat_face_min.jpg", face_image) else: tweet_text += "Error:猫の顔が検出できませんでした...ฅ(´・ω・`)ฅにゃーん" api.update_status(status=tweet_text, in_reply_to_status_id=status.id) print(tweet_text) return True except: tweet_text += "Error:猫の顔の検出に失敗しました...ฅ(´・ω・`)ฅにゃーん" api.update_status(status=tweet_text, in_reply_to_status_id=status.id) print(tweet_text) return True # 3.chainerに通して判定 try: data = [('cat_face_min.jpg', 3), ('cat_face_min.jpg', 3)] d = datasets.LabeledImageDataset(data) def transform(data): img, lable = data img = img / 255. return img, lable d = datasets.TransformDataset(d, transform) train, test = datasets.split_dataset(d, 1) x, t = test[0] x = x[None, ...] y = self.model(x) y = y.data cats = [ "スフィンクス", "アビシニアン", "ベンガル", "バーマン", "ボンベイ", "ブリティッシュショートヘア", "エジプシャンマウ", "メインクーン", "ペルシャ", "ラグドール", "ロシアンブルー", "シャム" ] cats_images = [ "Sphynx.jpg", "Abyssinian.jpg", "Bengal.jpg", "Birman.jpg", "Bombay.jpg", "British_Shorthair.jpg", "Egyptian_Mau.jpg", "Maine_Coon.jpg", "Persian.jpg", "Ragdoll.jpg", "Russian_Blue.jpg", "Siamese.jpg" ] tweet_text += "この猫は... " + cats[y.argmax( axis=1)[0]] + " ですฅ(´・ω・`)ฅにゃーん" media_images = [ "cat_face.jpg", "./cat_images/" + cats_images[y.argmax(axis=1)[0]] ] media_ids = [ api.media_upload(i).media_id_string for i in media_images ] api.update_status(status=tweet_text, in_reply_to_status_id=status.id, media_ids=media_ids) print(tweet_text) return True except: tweet_text += "Error:猫の顔の判定に失敗しました...ฅ(´・ω・`)ฅにゃーん" api.update_status(status=tweet_text, in_reply_to_status_id=status.id) print(tweet_text) return True return True
model = Model() cuda.cudnn_enabled = True cuda.check_cuda_available() xp = cuda.cupy model.to_gpu() #serializers.load_hdf5(args.model, model) train_pair_path = 'data/Annotations/train_label.txt' image_path = 'data/Imageset/live2_renamed_images' save_model_path = 'models/nr_jay_live2.model' file_path = './patches_weight.txt' #get the train dataset:(image_array, label), in the order of label train = datasets.LabeledImageDataset(train_pair_path, image_path, dtype=np.float32, label_dtype=np.int32) #val = datasets.LabeledImageDataset(val_pair_path, image_path, dtype=np.float32, label_dtype=np.int32) train_img_num = len(train) #val_img_num = len(val) #print img_num patches_per_img = 256 #extract all the images to 32x32 pixls f_label = open(train_pair_path, 'r') f_patch = open(file_path, 'r') list_label = [line.split()[0] for line in f_label.readlines()] #get names of training images
def __init__(self, path, root, mean, crop_size, random=True): self.base = datasets.LabeledImageDataset(path, root) self.mean = mean.astype(chainer.get_dtype()) self.crop_size = crop_size self.random = random