Example #1
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, 1, 4), Input data for train.
        y_train: ndarray(number, ), result data for train.
        (7776, 4, 1)
        (7776,)
        name: String, name of model.
        config: Dict, parameter for train.
    """
    opt = keras.optimizers.Adam(learning_rate=0.001)
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    model.compile(loss="mse", optimizer=opt, metrics=['mape'])
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/model_out/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/model_loss/' + name + '_loss.csv',
              encoding='utf-8',
              index=False)
Example #2
0
def main():
	# load MNIST images
	images, labels = dataset.load_train_images()

	# config
	config = model.config

	# settings
	max_epoch = 10000
	num_trains_per_epoch = 5000
	num_validation_data = 10000
	batchsize = 128

	# seed
	np.random.seed(args.seed)
	if args.gpu_device != -1:
		cuda.cupy.random.seed(args.seed)

	# save validation accuracy per epoch
	csv_results = []

	# create semi-supervised split
	training_images, training_labels, validation_images, validation_labels = dataset.split_data(images, labels, num_validation_data, seed=args.seed)
	training_labels = np.random.randint(0, config.num_classes, training_labels.size).astype(np.int32)
	validation_labels = np.random.randint(0, config.num_classes, validation_labels.size).astype(np.int32)

	# training
	progress = Progress()
	for epoch in xrange(1, max_epoch):
		progress.start_epoch(epoch, max_epoch)
		sum_loss = 0

		for t in xrange(num_trains_per_epoch):
			# sample from data distribution
			image_batch, label_batch = dataset.sample_data(training_images, training_labels, batchsize, binarize=False)
			image_batch = np.reshape(image_batch, (-1, 1, 28, 28))
			distribution = model.discriminate(image_batch, apply_softmax=False)
			loss = F.softmax_cross_entropy(distribution, model.to_variable(label_batch))
			sum_loss += float(loss.data)

			model.backprop(loss)

			if t % 10 == 0:
				progress.show(t, num_trains_per_epoch, {})

		model.save(args.model_dir)
		train_accuracy = compute_accuracy(training_images, training_labels)
		validation_accuracy = compute_accuracy(validation_images, validation_labels)
		
		progress.show(num_trains_per_epoch, num_trains_per_epoch, {
			"loss": sum_loss / num_trains_per_epoch,
			"accuracy (validation)": validation_accuracy,
			"accuracy (train)": train_accuracy,
		})

		# write accuracy to csv
		csv_results.append([epoch, train_accuracy, validation_accuracy, progress.get_total_time()])
		data = pd.DataFrame(csv_results)
		data.columns = ["epoch", "train_accuracy", "validation_accuracy", "min"]
		data.to_csv("{}/result.csv".format(args.model_dir))
Example #3
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """
    mlflow.set_tracking_uri("http://127.0.0.1:5000")
    tracking_uri = mlflow.get_tracking_uri()
    print("Current tracking uri: {}".format(tracking_uri))

    tags = {"usuario": "Anonymous"}

    mlflow.set_experiment("traffic_flow-saes")
    with mlflow.start_run() as run:
        mlflow.set_tags(tags)
        mlflow.keras.autolog()

        model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
        #early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
        hist = model.fit(X_train,
                         y_train,
                         batch_size=config["batch"],
                         epochs=config["epochs"],
                         validation_split=0.05)

        model.save('model/' + name + '.h5')
        df = pd.DataFrame.from_dict(hist.history)
        df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)
        mlflow.log_param("Run_id", run.info.run_id)
Example #4
0
def train(fname, out_fname):
    """ Train and save CNN model on ShipsNet dataset

    Args:
        fname (str): Path to ShipsNet JSON dataset
        out_fname (str): Path to output Tensorflow model file (.tfl)
    """

    # Load shipsnet data
    f = open(fname)
    shipsnet = json.load(f)
    f.close()

    # Preprocess image data and labels for input
    X = np.array(shipsnet['data']) / 255.
    X = X.reshape([-1, 3, 80, 80]).transpose([0, 2, 3, 1])
    Y = np.array(shipsnet['labels'])
    Y = to_categorical(Y, 2)

    # Train the model
    model.fit(X,
              Y,
              n_epoch=50,
              shuffle=True,
              validation_set=.2,
              show_metric=True,
              batch_size=128,
              run_id='shipsnet')

    # Save trained model
    model.save(out_fname)
Example #5
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
        
    """
    # Define the Keras TensorBoard callback.
    logdir = os.path.join(
    "logs",
    "fit",
    name,
    'lstm_4_4',
    datetime.now().strftime("%Y%m%d-%H%M"),
)
    tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

    model.compile(loss="mse", optimizer="adam", metrics=['mape'])
    early = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
    hist = model.fit(
        X_train, y_train,
        batch_size=config["batch"],
        epochs=config["epochs"],
        validation_split=0.05,
        callbacks=[tensorboard_callback, early])
    print(name);
    model.save('model/' + name + '4_layers_4'  + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name  +' loss.csv', encoding='utf-8', index=False)
Example #6
0
 def on_epoch_end(self, epoch, logs=None):
     # 每个epoch训练完后调用 如果当前loss更低 就保存当前模型
     # 也可以设置logs['loss'] 小于一个值时候停止训练
     loss_value.append(logs['loss'])
     print(loss_value)
     if logs['loss'] <= self.lowest:
         self.lowest = logs['loss']
         model.save(BEST_MODEL_PATH)
Example #7
0
def train_model(model,X_train,y_trian,name,config):
    model.compile(loss='mse',optimizer='rmsprop',metrics=['mape'])
    hist=model.fit(X_train,y_trian,
        batch_size=config['batch'],
        epochs=config['epochs'],
        validation_split=0.05)
    model.save('model/'+name+'.h5')
    df=pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/'+name+'_loss.csv',encoding='utf-8',index=False)
Example #8
0
 def on_epoch_end(self, epoch, logs=None):
     # 在每个epoch训练完成后调用
     # 如果当前loss更低,就保存当前模型参数
     if logs['loss'] <= self.lowest:
         self.lowest = logs['loss']
         model.save(settings.BEST_MODEL_PATH)
     # 随机生成几首古体诗测试,查看训练效果
     print()
     for i in range(settings.SHOW_NUM):
         print(utils.generate_random_poetry(tokenizer, model))
Example #9
0
def main():
    parser = _setup_parser()
    args = parser.parse_args()
    data = Data(args.batch_size)

    print("Model Summary : ")
    print(model)
    
    train(args, data)
    model.save(args.save_path)
Example #10
0
def _train_test():
    tag_folder = '../data/2015/training/event_tags/'
    data_folder = '../data/2015/training/stanford_parse/'
    data = get_data(tag_folder, data_folder)

    if not combined:
        train_data_context_x, train_data_context_pos_deprel, train_data_lemma_x, train_data_pos_deprel, train_data_children_pos_deprel, train_data_y = _get_data(
            data)
    else:
        train_x1, train_y1 = _get_joint(data)

    tag_folder = '../data/2015/eval/event_tags/'
    data_folder = '../data/2015/eval/stanford_parse/'
    data = get_data(tag_folder, data_folder)

    if not combined:
        test_data_context_x, test_data_context_pos_deprel, test_data_lemma_x, test_data_pos_deprel, test_data_children_pos_deprel, test_data_y = _get_data(
            data)
    else:
        train_x2, train_y2 = _get_joint(data)

    tag_folder = '../data/2016/event_tags/'
    data_folder = '../data/2016/stanford_parse/'
    data = get_data(tag_folder, data_folder)
    train_x3, train_y3 = _get_joint(data)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if not combined:
        model.fit([
            np.array(train_data_context_x + test_data_context_x),
            np.array(train_data_context_pos_deprel +
                     test_data_context_pos_deprel),
            np.array(train_data_lemma_x + test_data_lemma_x),
            np.array(train_data_pos_deprel + test_data_pos_deprel),
            np.array(train_data_children_pos_deprel +
                     test_data_children_pos_deprel),
        ],
                  np.array(train_data_y + test_data_y),
                  batch_size=1500,
                  nb_epoch=15,
                  verbose=1,
                  shuffle=True)
    else:
        model.fit(np.array(train_x1 + train_x2 + train_x3),
                  np.array(train_y1 + train_y2 + train_y3),
                  batch_size=1000,
                  nb_epoch=15,
                  verbose=1,
                  shuffle=True)

    model.save('realis_models/model_6.h5')
    """
Example #11
0
def save_model(model, history):
    if not gfile.Exists(MODEL_DIR):
        gfile.MakeDirs(MODEL_DIR)

    model.save(MODEL_FILE)

    if gfile.Exists(HISTORY_DIR) == False:
        gfile.MakeDirs(HISTORY_DIR)

    with open(HISTORY_FILE, 'wb') as f:
        pickle.dump(history.history, f)
Example #12
0
def train_model(model, X_train, y_train, name, config):
    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)
    temp = 'scaler'
    model.save('model/' + name + temp + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + temp + ' loss.csv',
              encoding='utf-8',
              index=False)
Example #13
0
def train_model(model, X_train, y_train, name, config):

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)
Example #14
0
def save_checkpoint(name, model):
    os.makedirs(f'{logdir}/{name}', exist_ok=True)
    loader = DisjointLoader(dataset_tr, batch_size=batch_size, epochs=1)
    all_pred_types = []
    all_actual_types = []
    print('>>> saving checkpoint <<<')
    for batch in loader:
        nodes, adj, edges = batch[0]
        actions, targets, mask = forward(model, *batch, training=False)
        pred_types, actual_types = log_prediction(nodes, targets, actions, mask)
        print('pred_types:', pred_types)
        print('actual_types:', actual_types)

        all_pred_types.extend(pred_types)
        all_actual_types.extend(actual_types)

    unique, counts = np.unique(all_actual_types, return_counts=True)
    label_dist = dict(zip(unique, counts))

    # confusion matrix
    import pandas as pd
    import seaborn as sn
    from matplotlib import pyplot as plt

    all_possible_types = [ i + 1 for i in range(max(*all_actual_types, *all_pred_types)) ]
    actual_df = pd.Categorical(all_actual_types, categories=all_possible_types)
    predicted_df = pd.Categorical(all_pred_types, categories=[*all_possible_types, 'Totals'])
    cm = pd.crosstab(actual_df, predicted_df, rownames=['Actual'], colnames=['Predicted'])

    for idx in all_actual_types:
        if idx not in all_pred_types:
            cm[idx] = 0

    totals = [ sum(row) for (_, row) in cm.iterrows() ]
    cm['Totals'] = totals
    sorted_cols = sorted([ c for c in cm.columns if type(c) is int ])
    sorted_cols.append('Totals')
    cm = cm.reindex(sorted_cols, axis=1)

    sn.heatmap(cm, annot=True)
    plt.title(f'confusion matrix ({name})')
    plt.savefig(f'{logdir}/{name}/confusion_matrix.png')
    plt.clf()

    # save the model(s)
    model.save(f'{logdir}/{name}/model')
Example #15
0
def train_allDense_model(model, X_train, y_train, name, config, lag):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"])

    model.save('model/' + name + '-' + str(lag) + '.h5')
Example #16
0
def train_model(model, X_train, y_train, name, config, lag):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/' + name + '-' + str(lag) + '.h5')
Example #17
0
def train(fname, out_fname):
    """ 
    All data was stored in a json file.
    """
    # Load dataset
    f = open(fname)
    planesnet = json.load(f)
    f.close()

    # Preprocess image data and labels for input
    X = np.array(planesnet['data']) / 255.
    X = X.reshape([-1,3,20,20]).transpose([0,2,3,1])
    Y = np.array(planesnet['labels'])
    Y = to_categorical(Y, 2)

    # Train the model
    model.fit(X, Y, n_epoch=50, shuffle=True, validation_set=.2,
              show_metric=True, batch_size=128, run_id='planesnet')

    # Save trained model
    model.save(out_fname)
Example #18
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['rmse'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)
Example #19
0
            # keras.callbacks.TensorBoard(write_images=True),
            keras.callbacks.ModelCheckpoint('models/model_latest.h5'),
            # keras.callbacks.ModelCheckpoint('models/model_saved{epoch:08d}.h5', save_freq=5),
            ModelCheckpoint(
                "models/model_best.h5",
                monitor='loss',
                verbose=0,
                save_best_only=True,
                mode='auto',
                # save_freq=10
            ),
            tf.keras.callbacks.EarlyStopping(monitor='loss',
                                             patience=10,
                                             restore_best_weights=True),
            # tf.keras.callbacks.ReduceLROnPlateau(
            #     monitor="loss",
            #     factor=0.1,
            #     patience=4,
            #     verbose=1,
            #     mode="auto",
            #     min_delta=1E-7,
            #     cooldown=0,
            #     # min_lr=0.0000001,
            # )
        ],
    )

    model.save('models/model_saved.h5')
    print('Done')
    print('Time:', util.time_delta(time() - start_time))
Example #20
0
print('Will train with {} and test with {} samples'.format(
    len(train_inputs[0]), len(test_inputs[0])))

avg_winners = np.mean(train_output, axis=0)


def custom_loss(y_true, y_pred):
    normalized_error = (y_pred - y_true) / avg_winners
    return tf.reduce_mean(tf.math.square(normalized_error), axis=1)


model.compile(optimizer='adam', loss=[None, custom_loss])
model.fit(train_inputs,
          train_output,
          validation_data=(test_inputs, test_output),
          epochs=1000,
          callbacks=[
              tf.keras.callbacks.EarlyStopping('loss', patience=5),
              tf.keras.callbacks.TensorBoard(log_dir='logs/' +
                                             time.strftime('%Y%m%d%H%M%S'),
                                             histogram_freq=1)
          ])

model.save('results/model.h5', include_optimizer=False)
normal_probs, lucky_probs = model.get_layer('gather_probs_layer').get_probs()
normal_probs = pd.Series(normal_probs, index=np.arange(1, 50))
lucky_probs = pd.Series(lucky_probs, index=np.arange(1, 11))
normal_probs.to_csv('results/normal_probs.csv', header=False)
lucky_probs.to_csv('results/lucky_probs.csv', header=False)
history = model.fit_generator(
      train_generator,
      steps_per_epoch=200,
      epochs=30,
      validation_data=validation_generator,
      validation_steps=50)
)

history = model.fit_generator(
      train_generator,
      steps_per_epoch=200,
      epochs=30,
      validation_data=validation_generator,
      validation_steps=50)

model.save('cats_and_dogs_small_1.h5')

#########그래프 그리기#############

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
Example #22
0
import keras
from keras.callbacks import CSVLogger

import path
from model import model
import batch
import logger

path.run.make()
run = path.run.loadCurrent()

model.compile(loss='categorical_crossentropy',
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])

model.fit_generator(batch.trainIterator,
                    validation_data=batch.validationIterator,
                    steps_per_epoch=batch.trainIterator.n / batch.size,
                    epochs=8,
                    callbacks=[
                        keras.callbacks.CSVLogger(run.log,
                                                  separator=',',
                                                  append=False)
                    ])

model.save(run.model)
logger.addModelDiagram(run)
logger.addModelSummary(run)
logger.addAccuracyPlot(run)
Example #23
0
batch_size = 32
epochs = 3

if __name__ == '__main__':
    # Get data
    (X_1, y_1) = get_data_from_list(glob('images/positive/*'), 1)
    (X_2, y_2) = get_data_from_list(glob('images/negative/**/*.*'), 0)
    X = np.append(X_1, X_2, axis=0)
    y = np.append(y_1, y_2, axis=0)

    # important to do this, model.fit won't shuffle randomly
    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3)

    # Init model structure
    model = model((200, 300, 3), 1)
    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(X_val, y_val),
              callbacks=[WandbCallback(log_batch_frequency=10)])
    model.save('models/base_model_v13.h5')

    # Note:
    # Keras aggregates the train acc/loss of all batches per epoch.
    # val acc/loss is therefore better than reported train acc/loss
Example #24
0
from time import time

import keras as k

import util
from dataset import data_training
from model import model


start_time = time()
data = data_training()

print('Training model...')
model.fit(data[0], data[1],
	batch_size=512, epochs=50,
	callbacks=[k.callbacks.TensorBoard(write_images=True)])
model.save('data/model.h5')
print('Done')
print('Time:', util.time_delta(time() - start_time))
Example #25
0
from keras.optimizers import SGD

import batch
from model import model

model.compile(loss='categorical_crossentropy',
              optimizer=SGD(lr=1e-3),
              metrics=['accuracy'])

model.fit_generator(batch.trainingBatchIterator,
                    steps_per_epoch=batch.sampleSize / batch.size,
                    epochs=2)

model.save('fine_tune.h5')
Example #26
0
le.fit(y)
y = le.transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

with open("X_test.npy", "wb+") as f:
    np.save(f, X_test)

with open("y_test.npy", "wb+") as f:
    np.save(f, y_test)

logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

early_callback = keras.callbacks.EarlyStopping(monitor='val_loss',
                                               min_delta=1e-5,
                                               patience=5)
checkpoint_callback = keras.callbacks.ModelCheckpoint(
    filepath="best_so_far.h5", save_best_only=True)

model.compile(optimizer="adam",
              loss="sparse_categorical_crossentropy",
              metrics=["accuracy", "categorical_accuracy", f1])
model.fit(
    X_train,
    y_train,
    validation_split=0.2,
    batch_size=500,
    epochs=1000,
    callbacks=[tensorboard_callback, early_callback, checkpoint_callback])
model.save("model.h5")
Example #27
0
        image = ndimage.imread('train_folder/face_{}_{}.jpg'.format(
            str(i + 1).zfill(3),
            str(j).zfill(5)))
        np.append(x_test, image)
        y_test[index] = i
        index += 1

x_train_array = np.load('x_train.npy')
y_train_array = np.load('y_train.npy')

print('Fit model')

if not os.path.exists('my_model-two.h5'):
    model.fit(x_train_array,
              y_train_array,
              epochs=10,
              batch_size=128,
              verbose=1)
    model.save('my_model-two.h5')
else:
    model = load_model('my_model-two.h5')

print('Evaluate')
x_test_array = np.expand_dims(np.asarray(x_test), axis=3)
y_test_array = to_categorical(y_test, num_classes=7)

# score = model.evaluate(x_test_array, y_test_array, batch_size=128)
print(x_test_array[0].shape)
score = model.predict(x_test_array)
print(score)
            train_ = train(r)
            p.append(train_[1].tolist())
            q.append(train_[2].tolist())

            # a_t = []
            # for ii in train_[0]:
            #     for iii in ii:
            #         a_t.extend(iii)
            # a.append(a_t)

            a_t = []
            for ii in train_[0].tolist():
                a_t.extend(ii)
            a.append(a_t)
            an.append(train_[3].tolist())
        p = np.array(p)
        q = np.array(q)
        a = np.array(a)
        an = np.array(an)
        print(p.shape, q.shape, a.shape, an.shape)
        yield ([p, q, a], [a, an])


model = model()
model.fit_generator(dgen(),
                    steps_per_epoch=100,
                    epochs=30,
                    validation_data=dgen(),
                    validation_steps=20)
model.save('oporc_1.h5')
Example #29
0
"""
Train and export machine learning model using MNIST dataset
"""

from model import model

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist

X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])

# Train the model
model.fit({'input': X}, {'target': Y},
          n_epoch=20,
          shuffle=True,
          validation_set=({
              'input': testX
          }, {
              'target': testY
          }),
          show_metric=True,
          batch_size=128,
          run_id='convnet_mnist')

# Save trained model
model.save("models/model.tfl")
Example #30
0
from model import model
from data import data

def parse():
    parser =  argparse.ArgumentParser()
    parser.add_argument('--action', required=True)
    parser.add_argument('--datatype')
    parser.add_argument('--load', action='store_true')
    args = parser.parse_args()
    return args

args = parse()

if args.action == 'data':
    if args.datatype != 'gigaword' and args.datatype != 'reuters' and args.datatype != 'cnn':
        print('Invalid data type.')
    else:
        data = data(args)
        data.prepare_data(args)
else:
    sess = tf.Session()
    model = model(sess, args)
    if(args.action == 'pretrain'):
        model.pretrain()
    elif(args.action == 'train'):
        model.train()
    elif(args.action == 'test'):
        model.test()
    elif(args.action == 'save'):
        model.save()