def _main(): options = cmdargv.parse_argv() statistics = pd.read_csv(options.statistics) _remap_logpath(statistics, options.statistics) with _open_output(options.output) as output: output.write(statistics.to_csv())
VALIDATION_SPLIT = 0.2 import os, sys, time import numpy as np import pandas as pd import encoder as enc import common as common import cmdargv as cmdargv from keras.models import Sequential, load_model from keras.layers.core import Dense from keras.layers.recurrent import SimpleRNN from keras.callbacks import EarlyStopping, ModelCheckpoint from sklearn.model_selection import train_test_split from sklearn import metrics options = cmdargv.parse_argv(sys.argv, ANN_NAME) # read file print('===== read file =====') df = pd.read_csv(options.dataset) print(df.info()) common.dropp_columns_regex(df, options.exclude) # dealing with: NaN, ∞, -∞ print('===== cleanup =====') dropped_columns = common.cleanup(df) print('dropped_columns: {}'.format(dropped_columns)) # encode print('===== encode =====')
from keras.models import Sequential from keras.layers import Dense, Flatten from keras.datasets import fashion_mnist from keras.losses import SparseCategoricalCrossentropy from sklearn import metrics import cmdargv import save_result from CustomLogger import CustomLogger import tf_tricks batch_size = 128 epochs = 12 # read commandline arguments options = cmdargv.parse_argv() # TensorFlow wizardry if options.allow_growth: tf_tricks.allow_growth() if options.fp16: tf_tricks.mixed_precision() start_time = time.time() # -------------------------------------------------┐ (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() train_images = train_images / 255.0 test_images = test_images / 255.0 preprocess_time = time.time() - start_time # --------------------------------┘ start_time = time.time() # -------------------------------------------------┐