Пример #1
0
def build_model():
    model = Sequential()
    model.add(Conv2D(64, (5, 5), (1, 1), "SAME", activation="relu", input_shape=(306, 408, 3)))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(64, (5, 5), (1, 1), "SAME", activation="relu"))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(64, (5, 5), padding="SAME", activation='relu'))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(16, (5, 5), padding="SAME", activation='relu'))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(8, activation='relu'))
    optimizer = Adadelta()
    model.compile(optimizer, loss=mean_squared_error)
    print(model.summary())
    train_X, train_y = GET_DATA.get_batches_data()
    cost_values = []
    for step in range(1000):
        cost = model.train_on_batch(train_X, train_y)
        cost_values.append(cost)
        if step % 10 == 0:
            print("step %d , cost value is %.3f" % (step, cost))
    model.save("./model1.h5")
    plt.plot(cost_values)
    plt.show()
Пример #2
0
class PPOValueBrain:
    def __init__(
        self,
        learning_rate: float = 0.0001,
        hidden_layers_count: int = 0,
        neurons_per_hidden_layer: int = 0,
    ):
        self.model = Sequential()

        for i in range(hidden_layers_count):
            self.model.add(Dense(neurons_per_hidden_layer, activation=tanh))

        self.model.add(Dense(1, activation=linear, use_bias=True))
        self.model.compile(loss=mse, optimizer=Adam(lr=learning_rate))

    def predict(self, state: np.ndarray) -> np.ndarray:
        return self.model.predict(np.array((state,)))[0]

    def train(self, states: np.ndarray, targets: np.ndarray):
        self.model.train_on_batch(states, targets)

    def save_model(self, filename: str):
        self.model.save(f"{filename}_critic.h5")

    def load_model(self, filename: str):
        self.model = load_model(filename)
def learn_model(x_train, y_train, x_test, y_test, take_components, save_path=None, do_pca=False):
    # pca select main features
    if do_pca:
        pca = PCA(n_components=take_components)
        print("Compute pca relevant features with " + str(take_components) + " percent of variance")
        previous_dims = len(x_train[0])
        x_train = pca.fit_transform(x_train)
        x_test = pca.transform(x_test)
        print(str(len(x_train[0])) + " dims are used from initially " + str(previous_dims))

    # expand dims
    x_train = np.expand_dims(x_train, axis=2)
    x_test = np.expand_dims(x_test, axis=2)

    # change label to categorical
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    # build model
    model = Sequential()
    model.add(Conv1D(256, 8, padding='same', input_shape=(x_train.shape[1], 1)))
    model.add(Activation('relu'))
    model.add(Conv1D(256, 8, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.6))
    model.add(Conv1D(128, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv1D(128, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv1D(128, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv1D(128, 8, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.6))
    model.add(Conv1D(64, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv1D(64, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.6))
    model.add(Dense(2))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=0.001, momentum=0.9, decay=0.0),
                  metrics=['acc'])

    # fit network
    model.fit(x_train, y_train, batch_size=16, epochs=33)

    # evaluate model
    _, accuracy = model.evaluate(x_test, y_test)

    # save model
    if save_path is not None:
        model.save(save_path)

    return accuracy
def train_model_2BLSTM_variableSequenceLength(path, subjectID, modelType, MLtechnique, features, labels, dw, batch_size, patience, LSTMunits=30):
	"""
	FUNCTION NAME: train_model_2BLSTM_variableSequenceLength

	This function trains a bidirectional LSTM model with 2 hidden layers
	when input sequences have difference length from one sample to the other. 
	In the first step, input sequences are arranged into a tensor of the same 
	length using zero padding. When data is ready, the bidirectional LSTM is trained.

	INPUT:
	------
		-> path:			full path where to store the trained model
		-> subjectID:		integer indicating the ID of the subject being analyzed
		-> modelType:		type of model to train
		-> MLtechnique:		technique to use to train the model
		-> features:		matrix of features to train the model
		-> labels:			matrix of labels to train the model
		-> dw:				factor used when downsampling the available data
		-> batch_size:		value for batch_size parameter
		-> patience:		value for patience parameter
		-> LSTMunits:		number of units of the LSTM
		
	OUTPUT:
	------- 

	"""

	epochs = 200
	verbose = 1

	if (dw == 1):
		modelName = path + 'Model_Subject' + str(subjectID) + '_' + MLtechnique + '_LSTMunits' + str(LSTMunits) + '_BatchSize' + str(batch_size) + '_Patience' + str(patience) + '_' + modelType
	else:
		modelName = path + 'Model_Subject' + str(subjectID) + '_DW' + str(dw) + '_' + MLtechnique + '_LSTMunits' + str(LSTMunits) + '_BatchSize' + str(batch_size) + '_Patience' + str(patience) + '_' + modelType

	# Convert data matrices to tensors
	T_features, T_labels = DE.dataMatrices2tensors(features, labels, modelType)

	# Define the Bidirectional LSTM
	model = Sequential([
				Masking(mask_value = 0., input_shape=(None,DE.get_3DtensorDimensions(T_features)[2])),
				Bidirectional(LSTM(LSTMunits, activation='tanh', return_sequences=True)),
				Bidirectional(LSTM(int(LSTMunits/2), activation='tanh', return_sequences=True)),
				TimeDistributed(Dense(1, activation='linear'))
				])

	model.compile(optimizer=Adam(),loss=loss_CCC)

	earlyStop = EarlyStopping(monitor='loss', patience=patience)
	callbacks_list = [earlyStop]

	# Train the model
	model.fit(T_features, T_labels, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks = callbacks_list, validation_split = 0)

	print '-> Saving model ..'
	# Save model
	model.save(modelName + '.h5')
	print '<- Model saved'	
Пример #5
0
class DNN(BaseTrainer):
    @Decorator.log(True)
    def load_data(self, **dict):
        print(dict)
        self.traindata = pd.read_csv(dict['train'], header=None)
        self.testdata = pd.read_csv(dict['test'], header=None)
        X = self.traindata.iloc[:, 1:42]
        Y = self.traindata.iloc[:, 0]
        C = self.testdata.iloc[:, 0]
        T = self.testdata.iloc[:, 1:42]
        trainX = np.array(X)
        testT = np.array(T)
        trainX.astype(float)
        testT.astype(float)
        scaler = Normalizer().fit(trainX)
        trainX = scaler.transform(trainX)
        scaler = Normalizer().fit(testT)
        testT = scaler.transform(testT)

        self.y_train = np.array(Y)
        self.y_test = np.array(C)

        self.X_train = np.array(trainX)
        self.X_test = np.array(testT)

    def train(self):
        batch_size = 64
        nb_epoch = 100
        if self.has_train:
            nb_epoch = nb_epoch - self.epoch
            print('new epoch', nb_epoch)
            self.model.fit(self.X_train,
                           self.y_train,
                           batch_size=batch_size,
                           epochs=nb_epoch,
                           callbacks=[self.checkpointer, self.csv_logger])
        else:
            # 1. define the network
            self.model = Sequential()
            self.model.add(Dense(1024, input_dim=41, activation='relu'))
            self.model.add(Dropout(0.01))
            self.model.add(Dense(1))
            self.model.add(Activation('sigmoid'))
            self.model.compile(loss='binary_crossentropy',
                               optimizer='adam',
                               metrics=['accuracy'])

            self.model.fit(self.X_train,
                           self.y_train,
                           batch_size=batch_size,
                           epochs=nb_epoch,
                           callbacks=[self.checkpointer, self.csv_logger])
            self.model.save("./dnn1layer_model.hdf5")
        score, acc = self.model.evaluate(self.X_test, self.y_test)
        print('Test score:', score)
        print('Test accuracy', acc)
Пример #6
0
def train_net(steps, epochs):
    # Get images
    X = []
    for filename in os.listdir('./color_images/Train/'):
        X.append(img_to_array(load_img('./color_images/Train/' + filename)))
        # print(filename)
    X = np.array(X, dtype=float)
    # Set up training and test data
    split = int(0.95 * len(X))
    Xtrain = X[:split]
    Xtrain = 1.0 / 255 * Xtrain
    # Design the neural network
    model = Sequential()
    model.add(InputLayer(input_shape=(None, None, 1)))
    model.add(Conv2D(8, (3, 3), input_shape=(None, None, 1), activation='relu', padding='same', strides=2))
    model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
    # Finish model
    model.compile(optimizer='rmsprop', loss='mse')
    # Image transformer
    datagen = ImageDataGenerator(
        shear_range=0.2,
        zoom_range=0.2,
        rotation_range=20,
        horizontal_flip=True)
    # Generate training data
    batch_size = 50

    def image_a_b_gen(batch_size):
        for batch in datagen.flow(Xtrain, batch_size=batch_size):
            lab_batch = rgb2lab(batch)
            X_batch = lab_batch[:, :, :, 0]
            Y_batch = lab_batch[:, :, :, 1:] / 128
            yield (X_batch.reshape(X_batch.shape + (1,)), Y_batch)

    # Train model
    TensorBoard(log_dir='/output')
    model.fit_generator(image_a_b_gen(batch_size), steps_per_epoch=steps, epochs=epochs)
    # Test images
    Xtest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 0]
    Xtest = Xtest.reshape(Xtest.shape + (1,))
    Ytest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 1:]
    Ytest = Ytest / 128
    print(model.evaluate(Xtest, Ytest, batch_size=batch_size))
    model.save('./result/network.h5')
    del model
def test_masking_fixed_length(get_random_data):
    nb_samples = 2
    timesteps = 10
    embedding_dim = 4
    output_dim = 5
    embedding_num = 12

    crf_loss_instance = ConditionalRandomFieldLoss()

    x, y = get_random_data(nb_samples,
                           timesteps,
                           x_high=embedding_num,
                           y_high=output_dim)
    # right padding; left padding is not supported due to the tf.contrib.crf
    x[0, -4:] = 0

    # test with masking, fix length
    model = Sequential()
    model.add(
        Embedding(embedding_num,
                  embedding_dim,
                  input_length=timesteps,
                  mask_zero=True))
    model.add(CRF(output_dim, name="crf_layer"))
    model.compile(optimizer='adam', loss={"crf_layer": crf_loss_instance})

    model.fit(x, y, epochs=1, batch_size=1)
    model.fit(x, y, epochs=1, batch_size=2)
    model.fit(x, y, epochs=1, batch_size=3)
    model.fit(x, y, epochs=1)

    # check mask
    y_pred = model.predict(x)
    assert (y_pred[0, -4:] == 0).all()  # right padding
    # left padding not working currently due to the tf.contrib.crf.*
    # assert (y_pred[1, :5] == 0).all()

    # test saving and loading model
    MODEL_PERSISTENCE_PATH = './test_saving_crf_model.h5'
    model.save(MODEL_PERSISTENCE_PATH)
    load_model(MODEL_PERSISTENCE_PATH, custom_objects={'CRF': CRF})

    try:
        os.remove(MODEL_PERSISTENCE_PATH)
    except OSError:
        pass
Пример #8
0
class BaseModel(Model):
    def __init__(self, hyperparameters: Dict[str, Any]):
        super(BaseModel, self).__init__()
        self.hyperparameters = hyperparameters
        self.model = Sequential()

    def predict_name(self, code_block: str):
        raise NotImplementedError

    @staticmethod
    def from_file(path: str):
        """
        :arg path directory path to a file that contains, config, model and weights.
        :return a model populated from a file path.
        """
        return load_model('{}/model.h5'.format(path))

    def save(self, filepath, overwrite=True, include_optimizer=True) -> None:
        self.model.save_weights(filepath)
        model_type = type(self).__name__
        model_config_to_save = {
            "model_type": model_type,
            "hyperparameters": self.hyperparameters,
        }

        # Save hyperparameters
        with open('{path}/{name}/model_config.json'.format(
                path=filepath, name=model_type)) as fp:
            json.dump(model_config_to_save, fp)

        # Save the model architecture
        with open('{path}/{name}/model.json'.format(
                path=filepath, name=model_type)) as model_json:
            model_json.write(self.model.to_json())

        # Save the weight
        self.model.save_weights('{path}/{name}/model_weights.h5'.format(
            path=filepath, name=model_type))

        # Save the model completely
        self.model.save('{path}/{name}/model.h5'.format(path=filepath,
                                                        name=model_type))
Пример #9
0
class DQNBrain:
    def __init__(
        self,
        output_dim: int,
        learning_rate: float = 0.0001,
        hidden_layers_count: int = 0,
        neurons_per_hidden_layer: int = 0,
        activation: str = "tanh",
        using_convolution: bool = False,
    ):
        self.model = Sequential()

        if using_convolution:
            self.model.add(Conv2D(64, kernel_size=3, activation=activation))
            self.model.add(Conv2D(32, kernel_size=3, activation=activation))
            self.model.add(Flatten())
            self.model.add(Dense(neurons_per_hidden_layer, activation=activation))
        else:
            for _ in range(hidden_layers_count):
                self.model.add(Dense(neurons_per_hidden_layer, activation=activation))

        self.model.add(Dense(output_dim, activation=linear, use_bias=False))
        self.model.compile(loss=mse, optimizer=Adam(lr=learning_rate))

    def predict(self, state: np.ndarray) -> np.ndarray:
        return self.model.predict(np.array((state,)))[0]

    def train(self, state: np.ndarray, chosen_action_mask: np.ndarray, target: float):
        target_vec = chosen_action_mask * target + (
            1 - chosen_action_mask
        ) * self.predict(state)
        self.model.train_on_batch(np.array((state,)), np.array((target_vec,)))

    def save_model(self, filename: str):
        self.model.save(f"{filename}.h5")

    def load_model(self, filename: str):
        self.model = load_model(filename)
def test_masking_fixed_length(get_random_data):
    nb_samples = 2
    timesteps = 10
    embedding_dim = 4
    output_dim = 5
    embedding_num = 12

    crf_loss_instance = ConditionalRandomFieldLoss()

    x, y = get_random_data(nb_samples,
                           timesteps,
                           x_high=embedding_num,
                           y_high=output_dim)

    # test with no masking, fix length
    model = Sequential()
    model.add(Embedding(embedding_num, embedding_dim, input_length=timesteps))
    model.add(CRF(output_dim, name="crf_layer"))
    model.compile(optimizer='adam', loss={"crf_layer": crf_loss_instance})

    model.fit(x, y, epochs=1, batch_size=1)
    model.fit(x, y, epochs=1, batch_size=2)
    model.fit(x, y, epochs=1, batch_size=3)
    model.fit(x, y, epochs=1)

    # test saving and loading model
    MODEL_PERSISTENCE_PATH = './test_saving_crf_model.h5'
    model.save(MODEL_PERSISTENCE_PATH)
    load_model(MODEL_PERSISTENCE_PATH,
               custom_objects={
                   'CRF': CRF,
                   'crf_loss': crf_loss
               })

    try:
        os.remove(MODEL_PERSISTENCE_PATH)
    except OSError:
        pass
Пример #11
0
def evaluate_model(trainX, trainy, testX, testy):
    verbose, epochs, batch_size = 0, 10, 32
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]
    # model = Sequential()
    # model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps, n_features)))
    # model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
    # model.add(Dropout(0.5))
    # model.add(MaxPooling1D(pool_size=2))
    # model.add(Flatten())
    # model.add(Dense(100, activation='relu'))
    # model.add(Dense(n_outputs, activation='softmax'))
    # model.summary()
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]
    model = Sequential()
    model.add(LSTM(100, input_shape=(n_timesteps, n_features)))
    model.add(Dropout(0.5))
    model.add(Dense(100, activation='relu'))
    model.add(Dense(n_outputs, activation='softmax'))
    # model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit network
    model.fit(trainX,
              trainy,
              epochs=epochs,
              batch_size=batch_size,
              verbose=verbose)
    # evaluate model
    # _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
    # return accuracy
    # save model
    model.save('./models/model_test.h5')
    keras_model = load_model('./models/model_test.h5')
    # onnx_model = onnxmltools.convert_keras(keras_model)
    # onnxmltools.utils.save_model(onnx_model, './models/model_test.onnx')
    tf.keras.utils.plot_model(
        model,
        to_file="CNN.png",
        show_shapes=False,
        show_dtype=False,
        show_layer_names=True,
        rankdir="TB",
        expand_nested=False,
        dpi=96,
    )

    # load model
    model = load_model('./models/model_test.h5')

    y_predict = model.predict(testX, batch_size=batch_size, verbose=verbose)
    # print('y_predict:', y_predict)
    y_predict = np.argmax(y_predict, axis=1)
    testy = np.argmax(testy, axis=1)
    y_true = np.reshape(testy, [-1])
    y_pred = np.reshape(y_predict, [-1])

    # evaluation
    accuracy = accuracy_score(y_true, y_pred)
    precision = precision_score(y_true, y_pred, average='macro')
    recall = recall_score(y_true, y_pred, average='macro')
    f1score = f1_score(y_true, y_pred, average='macro')
    return [accuracy, precision, recall, f1score]
Пример #12
0
model.add(layers.LSTM(100, recurrent_activation='sigmoid'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam', metrics=['accuracy'])
model.summary()

filename = os.path.join(current_dir, 'data', 'complain_model.h5')
is_training = False
if is_training:
    model.fit(X_train, Y_train, validation_data=(
        X_test, Y_test), epochs=20, batch_size=64)

    # Evaluate the model
    scores = model.evaluate(X_test, Y_test, verbose=0)
    print("Evaluation Accuracy: %.2f%%" % (scores[1]*100))
    model.save(filename, save_format='tf')
else:
    model.load_weights(filename)

t1 = time.time()

lstm_upstream = tf.keras.Model(
    inputs=model.input, outputs=model.get_layer('max_pooling1d').output)
lstm_input = lstm_upstream.predict(X_test, batch_size=8)
# print(lstm_input.shape)

num_records = lstm_input.shape[0]
quantized_lstm_input = quanti_convert_float_to_int16(
    lstm_input.reshape(num_records * 25*32), in_pos).reshape((num_records, 25*32))
lstm_output = np.zeros((num_records, 25*100), dtype=np.int16)
class Predictioner:
    numpy.random.seed(1234)
    set_seed(1234)

    def __init__(self):
        self.model = Sequential()
        self.setup_default_model()
        self.compile_model()

    def save_model(self, path):
        self.model.save(path)

    def load_model(self, path):
        self.model = keras.models.load_model(path)

    def update_input(self, train_x, train_y):
        self.push_train_sets(train_x, train_y)
        self.y_scaler = MinMaxScaler()
        self.x_scaler = MinMaxScaler()
        self.reshape_train_sets()
        self.adjust_scalers()

    def push_train_sets(self, train_x, train_y):
        self.train_x = train_x
        self.train_y = train_y

    def reshape_train_sets(self):
        self.train_x = reshaper(self.train_x, self.train_x.shape[1])
        self.train_y = reshaper(self.train_y, 1)

    def adjust_scalers(self):
        self.train_x = self.x_scaler.fit_transform(self.train_x)
        self.train_y = self.y_scaler.fit_transform(self.train_y)

    def setup_default_model(self):
        self.model.add(Dense(30))
        self.model.add(Dense(90, activation='relu'))
        self.model.add(Dense(45, activation='relu'))
        self.model.add(Dense(20, activation='relu'))
        self.model.add(Dense(10, activation='relu'))
        self.model.add(Dense(1))

    def compile_model(self):
        self.model.compile(
            optimizer=keras.optimizers.Adam(),
            loss=keras.losses.mean_squared_error,
            metrics=[
                keras.metrics.mean_squared_error,
                keras.metrics.mean_squared_logarithmic_error,
                keras.metrics.mean_absolute_percentage_error,
                keras.metrics.mean_absolute_error,
            ]
        )

    def fit_model(self, verbose=0):
        self.model.fit(
            self.train_x,  # [:int(len(self.train_x) * 0.66)],
            self.train_y,  # [:int(len(self.train_y) * 0.66)],
            epochs=300,
            batch_size=10,
            verbose=verbose,
            # validation_data=(self.train_y[int(len(self.train_x) * 0.66):],
            #                 self.train_x[int(len(self.train_x) * 0.66):])
        )

    def evaluate(self, x_test, y_test):
        return self.model.evaluate(x_test, y_test, batch_size=12, verbose=1)

    def predict(self, prediction_interval_x):
        prediction_interval_x = self.x_scaler.transform(prediction_interval_x)
        predicted_y = self.model.predict(prediction_interval_x)

        self.x_plot = self.x_scaler.inverse_transform(self.train_x)
        self.y_plot = self.y_scaler.inverse_transform(self.train_y)
        self.x_pred_plot = self.x_scaler.inverse_transform(prediction_interval_x)
        self.y_pred_plot = self.y_scaler.inverse_transform(predicted_y)
        return self.y_pred_plot

    def visualize(self):
        pyplot.scatter(self.x_pred_plot, self.y_pred_plot, label='Predicted')
        pyplot.scatter(self.x_plot, self.y_plot, label='Actual')
        pyplot.title('Input (x) versus Output (y)')
        pyplot.xlabel('Input Variable (x)')
        pyplot.ylabel('Output Variable (y)')
        pyplot.legend()
        pyplot.show()
Пример #14
0
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
# number of neurons equal to number of intents to predict output intent with softmax
model.add(Dense(len(train_y[0]), activation='softmax'))

tf.keras.utils.plot_model(model, to_file="img.png", show_shapes=True)


sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('models/chatbot_model.h5', hist)
print("model created")

# pyplot.subplot(211)
# pyplot.title('Loss')
# pyplot.plot(hist.history['loss'], label='train')
# pyplot.plot(hist.history['val_loss'], label='test')
# pyplot.legend()
# # plot accuracy during training
# pyplot.subplot(212)
# pyplot.title('Accuracy')
# pyplot.plot(hist.history['accuracy'], label='train')
# pyplot.plot(hist.history['val_accuracy'], label='test')
# pyplot.legend()
# pyplot.show()
Пример #15
0
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,

    class_mode='binary')

model.fit(
    train_generator,
    steps_per_epoch=nb_train_samples // batch_size,
    epochs=epochs,
    validation_data=validation_generator,
    validation_steps=nb_validation_samples // batch_size)

model.save('potato.h5')
model.save_weights('potato_weights.h5')
Пример #16
0
class TrainTop:
    """
    Trains the top layer. The class variables includes a parametrized Model created using SuperNet
    Call the train function of class to start training and the test function to test the function. The test function also
    writes the data to the buckets file.
    :param epochs: The number of epochs
    :param batch_size: Model training batch size
    :param loss: Defines the criterion function i.e. Loss function. Default is MSELoss()
    :param lr: The learning rate for the model
    :param verbose: Whether to print the logs
    :param identifier: Used to identify the type of dataset
    :param n1: Number of layers in the first layer
    :param n2: Number of layers in the second layer
    :param bias: Bias used or not for Model: bool
    :param validation_split: Amount of validation used to avoid overfitting
    :param optimizer: Defines the optimizer used
    """

    def __init__(self, identifier, epochs, batch_size, filename, lr=0.01, loss="mse", n1=32, n2=0,
                 bias=True, optimizer='RMSprop', validation_split=0.1, verbose=True):
        self.epochs = epochs
        self.batch_size = batch_size
        self.loss = loss
        self.verbose = verbose
        self.optimizer = optimizer
        self.identifier = identifier
        self.n1 = n1
        self.n2 = n2
        self.bias = bias
        self.lr = lr
        self.validation_split = validation_split
        self.model = None
        self.keys, self.values = None, None
        self.filename = filename

    def train(self):

        self.model = Sequential()
        self.model.add(Dense(self.n1, activation=tf.nn.relu, use_bias=self.bias, input_shape=(1,)))
        if self.n2 != 0:
            self.model.add(Dense(self.n2, activation=tf.nn.relu, use_bias=self.bias))
        self.model.add(Dense(1))
        self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=['mse', 'mse'])

        self.keys, self.values = import_data(self.filename)
        self.model.fit(self.keys, self.values, epochs=self.epochs, batch_size=self.batch_size, verbose=self.verbose,
                       validation_split=self.validation_split)

        if not os.path.exists("models_tf/{}".format(self.identifier)):
            os.makedirs("models_tf/{}".format(self.identifier))
        self.model.save("models_tf/{}/super_layer.h5".format(self.identifier))

        converter = tf.lite.TFLiteConverter.from_keras_model(self.model)  # TF 2.0
        # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]

        # Following code is used for further optimization of weights. Here we notice a massive drop in performance
        converter.optimizations = [tf.lite.Optimize.DEFAULT]

        sample = tf.cast(self.keys, tf.float32)
        sample = tf.data.Dataset.from_tensor_slices((sample)).batch(1)
        def representative_data_gen():
            for input_value in sample.take(1300000):
                yield [input_value]
        converter.representative_dataset = representative_data_gen

        tflite_model = converter.convert()

        open("models_tf/{}/super_layer.tflite".format(self.identifier), "wb").write(tflite_model)

    def getWeights(self, tflite=False, read_model=False):
        """
        :param read_model: Whether to read the model saved by the train function
        :param tflite: Read from quantized tflite file
        """

        if self.keys is None:
            self.keys, self.values = import_data(self.filename)
        predictions = None

        if read_model:
            if tflite:
                self.model = tf.lite.Interpreter(model_path="models_tf/{}/super_layer.tflite".format(self.identifier))
                self.model.allocate_tensors()
                # Get input and output tensors.
                details = self.model.get_tensor(0)
                print(details)
                details = self.model.get_tensor(1)
                print(details)
                details = self.model.get_tensor(2)
                print(details)
                details = self.model.get_tensor(3)
                print(details)
                # details = self.model.get_tensor(4)
                # print(details)
                # details = self.model.get_tensor(4)
                # print(details)
                # details = self.model.get_tensor(5)
                # print(details)
                # details = self.model.get_tensor(6)
                # print(details)
                input_details = self.model.get_input_details()
                output_details = self.model.get_output_details()

            else:
                if self.model is None:
                    self.model = tf.keras.models.load_model("models_tf/{}/super_layer.h5".format(self.identifier))
                for layer in self.model.layers:
                    print(layer.get_weights())
        else:
            for layer in self.model.layers:
                print(layer.get_weights())

    def test(self, tflite=False, read_model=False, write_buckets=True, total_buckets=100):
        """
        :param read_model: Whether to read the model saved by the train function
        :param total_buckets: Divide the data between the buckets to train the model for next layer
        :param write_buckets: Write the buckets out to disk. Writes the training data for next layer to the buckets directory.
        :param tflite: Read from quantized tflite file
        """
        if self.keys is None:
            self.keys, self.values = import_data(self.filename)
        predictions = None

        if read_model:
            if tflite:
                self.model = tf.lite.Interpreter(model_path="models_tf/{}/super_layer.tflite".format(self.identifier))
                self.model.allocate_tensors()
                # Get input and output tensors.
                input_details = self.model.get_input_details()
                output_details = self.model.get_output_details()
                self.keys = np.reshape(self.keys, (-1,1,1)).astype(np.float32)
                predictions = []
                for i in range(self.keys.shape[0]):
                    self.model.set_tensor(input_details[0]['index'], self.keys[i])
                    self.model.invoke()
                    predictions.append(self.model.get_tensor(output_details[0]['index'])[0])
                predictions = np.asarray(predictions)
            else:
                self.model = tf.keras.models.load_model("models_tf/{}/super_layer.h5".format(self.identifier))
                predictions = self.model.predict(self.keys)
        else:
            predictions = self.model.predict(self.keys)

        if self.verbose:
            print("\n\nEvaluation:\n\n")


        big_bucket = dict()
        self.keys = np.reshape(self.keys, (-1,1))
        predictions = np.concatenate((self.keys, self.values, predictions), axis=1)

        total_length = predictions.shape[0]

        for i in range(total_buckets):
            big_bucket[i] = []

        for i, (k, v, o) in enumerate(predictions):
            k = k.item()
            v = v.item()
            o = o.item()

            if self.verbose and i % 8000 == 0:
                print("Record: ", i+1, "Key: ", k, "Value: ", v, "Model Output: ", o, "Difference: ", o-v)

            mn = (total_buckets * o) / total_length
            model_num = np.clip(np.floor(mn), 0, total_buckets - 1)
            big_bucket[int(model_num)].append([v, k])

        if write_buckets:
            print("\n\nSaving data files for layer 2:\n\n")
            if not os.path.exists("buckets_tf/{}".format(self.identifier)):
                os.makedirs("buckets_tf/{}".format(self.identifier))
            for b in big_bucket:
                np.savetxt(fname="buckets_tf/{}/bucket_{}.txt".format(self.identifier, b), X=np.array(big_bucket[b]), fmt="%u")
))

model.add(Conv2D(64, 3, strides=2, padding='same', activation='relu'))
model.add(MaxPooling2D(2, 2, 'same'))
model.add(Conv2D(128, 3, strides=2, padding='same', activation='relu'))
model.add(MaxPooling2D(2, 2, 'same'))

#把第二个池化层的输出扁平化为1维
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))

# 定义优化器
sgd = SGD(lr=0.01)

# 定义优化器,loss function,训练过程中计算准确率
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train_data, y_train_data, batch_size=64, epochs=100)

# 评估模型
loss, accuracy = model.evaluate(x_test_data, y_test_data)

model_path = "./weights/mnist_cnn.h5"
model.save(model_path, include_optimizer=False, save_format='h5')

print('test loss', loss)
print('test accuracy', accuracy)
Пример #18
0
    rcd += "lr: " + str(lr) + '\n'
    rcd += "batch_size: " + str(batch_size) + '\n'
    rcd += "l2_param: " + str(l2_param) + '\n'
    rcd += "dropout: " + str(dropout) + '\n'
    rcd += "training_epochs: " + str(training_epochs) + '\n'
    rcd += "n_inputs: " + str(n_inputs) + '\n'
    rcd += "n_outputs: " + str(n_outputs) + '\n'
    rcd += "n_mlp: " + str(n_mlp) + '\n'
    rcd += "mae: " + str(mae) + '\n'
    rcd += "time: " + str(end - start) + '\n' + '\n' + '\n'
    print(rcd)
    log_file = open(DATA_PATH_RESULT + "mlp_result", "a")
    log_file.write(rcd)
    log_file.close()

    mlp.save('ckpt/' + file_name[:-3] + '/' + file_name)

elif mode == PRED:
    # file_name = 'ckpt/mlp-ep250-loss81.952-val_loss96.125-lr: 0.001-batch_size: 93312-l2_param: 0.001-' \
    #             'dropout: 1-training_epochs: 2000-n_inputs: 244-n_outputs: 4-' \
    #             'n_mlp: [400, 400, 400, 400, 400, 400, 4].h5'

    # no averWeekOutIn
    # 13.21
    # file_name = 'ckpt/mlp-ep320-loss81.004-val_loss92.560-lr: 0.001-batch_size: 93312-l2_param: 0.001-dropout: 1-training_epochs: 2000-n_inputs: 243-n_outputs: 4-n_mlp: [400, 400, 400, 400, 400, 400, 4].h5'
    # 12.94
    # validation data -1:
    file_name = 'ckpt/mlp-ep920-loss81.810-val_loss94.843-lr: 0.001-batch_size: 93312-l2_param: 0.01-dropout: 0.8-training_epochs: 2000-n_inputs: 243-n_outputs: 4-n_mlp: [400, 400, 400, 400, 400, 400, 4].h5'
    # file_name = 'ckpt/mlp-ep1080-loss77.875-val_loss91.735-lr: 0.0001-batch_size: 93312-l2_param: 0.01-dropout: 0.8-training_epochs: 2000-n_inputs: 243-n_outputs: 4-n_mlp: [400, 400, 400, 400, 400, 400, 4].h5'
    # 14.85
    # file_name = 'ckpt/mlp-ep1720-loss82.743-val_loss85.495-lr: 0.0001-batch_size: 93312-l2_param: 0.01-dropout: 0.8-training_epochs: 2000-n_inputs: 243-n_outputs: 2-n_mlp: [200, 200, 200, 200, 200, 200, 2].h5'
Пример #19
0
def train_task(data: InputBinaryFile(str), epochs: int, batch_size: int,
               model_path: OutputBinaryFile(str)):
    """Train CNN model on MNIST dataset."""

    from tensorflow.python import keras
    from tensorflow.python.keras import Sequential, backend as K
    from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
    import numpy as np

    mnistdata = np.load(data)

    train_x = mnistdata['train_x']
    train_y = mnistdata['train_y']
    test_x = mnistdata['test_x']
    test_y = mnistdata['test_y']

    num_classes = 10
    img_w = 28
    img_h = 28

    if K.image_data_format() == 'channels_first':
        train_x.shape = (-1, 1, img_h, img_w)
        test_x.shape = (-1, 1, img_h, img_w)
        input_shape = (1, img_h, img_w)
    else:
        train_x.shape = (-1, img_h, img_w, 1)
        test_x.shape = (-1, img_h, img_w, 1)
        input_shape = (img_h, img_w, 1)

    model = Sequential([
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape),
        Conv2D(64, (3, 3), activation='relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),
        Flatten(),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(num_classes, activation='softmax'),
    ])

    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.Adadelta(),
        metrics=['accuracy'],
    )

    model.fit(
        train_x,
        train_y,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=(test_x, test_y),
    )

    score = model.evaluate(test_x, test_y)
    print('Test loss & accuracy: %s' % (score, ))

    model.save(model_path)
Пример #20
0
#score
score = model.evaluate(X_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
#make a prediction
prediction1 = model.predict(XAle)

print(prediction1)

predplusnormalized = np.column_stack([XAle, prediction1[:, -1]])
inverted = scaler.inverse_transform(predplusnormalized)
result = inverted[:, 17]
print(result)

# RICKY TODO Save the model
import tensorflow as tf
model.save('C:\\Users\\Arturo A\\AguayDrenaje\\modeloDiario')
#RICKY Send export scalar to be used by flask later
from sklearn.externals import joblib
scaler_filename = "C:\\Users\\Arturo A\\AguayDrenaje\\testScaler"
joblib.dump(scaler, scaler_filename)
#YOU DON;T REALLY NEED SCALED
scaled_filename = "C:\\Users\\Arturo A\\AguayDrenaje\\testScaled"
joblib.dump(scaled, scaled_filename)
#RICKY
# Recreate the exact same model purely from the file
new_model = new_model = tf.keras.models.load_model(
    'C:\\Users\\Arturo A\\AguayDrenaje\\modeloDiario')
#RICKY test if model is imported correctly making a test
prediction = new_model.predict(X_test)
print(prediction)
Пример #21
0
def runTrainingClassification(uuid,
                              datasetDir,
                              validDir,
                              classNum,
                              dropoutValue=0.2,
                              batch_size=128,
                              nb_epoch=20,
                              step_size_train=10,
                              alphaVal=0.75,
                              depthMul=1):

    imageGen = ImageDataGenerator(rotation_range=30,
                                  width_shift_range=0.35,
                                  height_shift_range=0.35,
                                  zoom_range=0.35,
                                  shear_range=0.35,
                                  vertical_flip=False,
                                  horizontal_flip=False,
                                  brightness_range=[0.65, 1.35],
                                  rescale=1. / 255)

    trainSet = imageGen.flow_from_directory(datasetDir,
                                            target_size=(224, 224),
                                            color_mode='rgb',
                                            batch_size=batch_size,
                                            class_mode='categorical',
                                            shuffle=True)
    validSet = imageGen.flow_from_directory(validDir,
                                            target_size=(224, 224),
                                            color_mode='rgb',
                                            batch_size=32,
                                            class_mode='categorical',
                                            shuffle=True)

    class EarlyStoppingAtMinLoss(tf.keras.callbacks.Callback):
        def __init__(self, patience=3):
            super(EarlyStoppingAtMinLoss, self).__init__()
            self.patience = patience
            self.best_weights = None

        def on_train_begin(self, logs=None):
            self.wait = 0
            self.stopped_epoch = 0
            self.best = np.Inf
            self.last_acc = 0
            self.atleastepoc = 0

        def on_epoch_end(self, epoch, logs=None):
            current = logs.get('val_loss')
            val_acc = logs.get('val_acc')
            self.atleastepoc = self.atleastepoc + 1
            if np.less(current, self.best
                       ) or self.last_acc < 0.95 or self.atleastepoc < 25:
                self.best = current
                self.wait = 0
                self.last_acc = val_acc
                self.best_weights = self.model.get_weights()
            else:
                self.wait += 1
                if self.wait >= self.patience:
                    self.stopped_epoch = epoch
                    self.model.stop_training = True
                    print(
                        '\nRestoring model weights from the end of the best epoch.'
                    )
                    self.model.set_weights(self.best_weights)

        def on_train_end(self, logs=None):
            if self.stopped_epoch > 0:
                print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))

    base_model = tf.keras.applications.MobileNet(input_shape=(224, 224, 3),
                                                 alpha=alphaVal,
                                                 depth_multiplier=depthMul,
                                                 dropout=dropoutValue,
                                                 pooling='avg',
                                                 include_top=False,
                                                 weights="imagenet",
                                                 classes=classNum)

    mbnetModel = Sequential([
        base_model,
        Dropout(dropoutValue, name='dropout'),
        Dense(classNum, activation='softmax')
    ])

    if classNum == 2:
        mbnetModel.compile(loss='binary_crossentropy',
                           optimizer=RAdam(),
                           metrics=['accuracy'])
    else:
        mbnetModel.compile(
            loss=
            'categorical_crossentropy',  #loss_softmax_cross_entropy_with_logits_v2,
            optimizer=RAdam(),
            metrics=['accuracy'])

    history = History()

    try:
        mbnetModel.fit_generator(generator=trainSet,
                                 steps_per_epoch=step_size_train,
                                 callbacks=[EarlyStoppingAtMinLoss(), history],
                                 epochs=50,
                                 validation_data=validSet)
    except Exception as e:
        return (-14, f'Unexpected Error Found During Triaining, {e}')

    mbnetModel.save(f'{localSSDLoc}trained_h5_file/{uuid}_mbnet10.h5')

    converter = tf.lite.TFLiteConverter.from_keras_model_file(
        f'{localSSDLoc}trained_h5_file/{uuid}_mbnet10.h5',
        custom_objects={
            'RAdam':
            RAdam,
            'loss_softmax_cross_entropy_with_logits_v2':
            loss_softmax_cross_entropy_with_logits_v2
        })
    tflite_model = converter.convert()
    open(f'{localSSDLoc}trained_tflite_file/{uuid}_mbnet10_quant.tflite',
         "wb").write(tflite_model)

    subprocess.run([
        f'{nncaseLoc}/ncc',
        f'{localSSDLoc}trained_tflite_file/{uuid}_mbnet10_quant.tflite',
        f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet10_quant.kmodel', '-i',
        'tflite', '-o', 'k210model', '--dataset', validDir
    ])

    if os.path.isfile(
            f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet10_quant.kmodel'):
        return (
            0, f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet10_quant.kmodel',
            history, validSet, mbnetModel)
    else:
        return (-16,
                'Unexpected Error Found During generating Kendryte k210model.')
Пример #22
0
import tensorflow as tf
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras import Sequential
import numpy as np

my_dat_input = np.load('kayit_giris.npy').reshape(-1, 3) / 600.0
my_dat_output = np.load('kayit_cikis.npy').reshape(-1, )

output = [0 if i == -15 else 1 for i in my_dat_output]

model = Sequential()
model.add(Dense(64, input_shape=(3, ), activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='adam',
              loss=tf.keras.losses.binary_crossentropy,
              metrics=['accuracy'])

model.fit(my_dat_input, output, epochs=50, batch_size=20, validation_split=0.1)

model.save('ballwar.h5')
Пример #23
0
history = model.fit(Xtrain,
                    ytrain,
                    epochs=50,
                    batch_size=256,
                    validation_data=val_data,
                    callbacks=callbacks,
                    class_weight={
                        0: 0.25,
                        1: 0.75
                    },
                    verbose=verbose)

if kaggle_mode:
    y_pred = model.predict_classes(Xval)
    submission = pd.DataFrame({
        'qid': test_data.qid,
        'prediction': y_pred.flatten()
    })
    submission.to_csv('submission.csv', index=False)

else:
    joblib.dump(tokenizer, tokenizer_path)
    model.save(model_path)
    with open(history_path, "w") as f:
        f.write(json.dumps(history.history))
        f.write("\n\n")
        f.write(json.dumps(history.params))
        f.write("\n\n")
        f.write("F1 scores: " + json.dumps(f1_callback.f1_scores))
        f.write("\n")
Пример #24
0
                    y_train,
                    epochs=1000,
                    batch_size=20,
                    validation_data=(x_test, y_test))

scores = model.evaluate(X, Y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

plt.figure()
plt.isinteractive()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='best')

plt.figure()

plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='best')

plt.show()

# save the model
model.save('saved_model')
Пример #25
0
def train_task(
    endpoint: str, bucket: str, data: str, epochs: int, batch_size: int
) -> NamedTuple('Model', [('filename', str), ('examples', str)]):
    """Train CNN model on MNIST dataset."""

    from pathlib import Path
    from tempfile import TemporaryFile
    from tensorflow.python import keras
    from tensorflow.python.keras import backend as K
    from tensorflow.python.keras import Sequential
    from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
    from tensorflow.python.keras.utils import to_categorical
    import numpy as np
    from minio import Minio

    mclient = Minio(
        endpoint,
        access_key=Path('/secrets/accesskey').read_text(),
        secret_key=Path('/secrets/secretkey').read_text(),
        secure=False,
    )

    with TemporaryFile('w+b') as outp:
        with mclient.get_object(bucket, data) as inp:
            outp.write(inp.read())
        outp.seek(0)
        mnistdata = np.load(outp)

        train_x = mnistdata['train_x']
        train_y = to_categorical(mnistdata['train_y'])
        test_x = mnistdata['test_x']
        test_y = to_categorical(mnistdata['test_y'])

    # For example purposes, we don't need the entire training set, just enough
    # to get reasonable accuracy
    train_x = train_x[:10000, :, :]
    train_y = train_y[:10000]

    num_classes = 10
    img_w = 28
    img_h = 28

    if K.image_data_format() == 'channels_first':
        train_x.shape = (-1, 1, img_h, img_w)
        test_x.shape = (-1, 1, img_h, img_w)
        input_shape = (1, img_h, img_w)
    else:
        train_x.shape = (-1, img_h, img_w, 1)
        test_x.shape = (-1, img_h, img_w, 1)
        input_shape = (img_h, img_w, 1)

    train_x = train_x.astype('float32')
    test_x = test_x.astype('float32')
    train_x /= 255
    test_x /= 255

    model = Sequential(
        [
            Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
            Conv2D(64, (3, 3), activation='relu'),
            MaxPooling2D(pool_size=(2, 2)),
            Dropout(0.25),
            Flatten(),
            Dense(128, activation='relu'),
            Dropout(0.5),
            Dense(num_classes, activation='softmax'),
        ]
    )

    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.Adadelta(),
        metrics=['accuracy'],
    )

    model.fit(
        train_x,
        train_y,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=(test_x, test_y),
    )

    score = model.evaluate(test_x, test_y)
    print('Test loss & accuracy: %s, %s' % score)

    model_name = 'model.h5'

    model.save(f'/output/{model_name}')

    mclient.fput_object(bucket, model_name, f'/output/{model_name}')

    examples = 'examples.npz'

    np.savez_compressed(
        f'/output/{examples}',
        **{
            'X': test_x[:10, :, :, :],
            'y': test_y[:10],
        },
    )

    mclient.fput_object(bucket, examples, f'/output/{examples}')

    return model_name, examples
Пример #26
0
                          min_delta=0,
                          patience=10,
                          verbose=1,
                          mode='auto')

callbacks = [checkpoint, earlystop]

model.fit(total_x,
          total_y,
          validation_split=0.15,
          batch_size=100,
          epochs=100,
          callbacks=callbacks,
          verbose=1)

model.save("model/my_model.h5")

# 直接测试
# # 原始文件的header顺序
# test_file_path = "E:\\Study\\19华为杯\\赛题\\2019年中国研究生数学建模竞赛A题\\train_set\\train_2915501.csv"
# test_x = []
# test_y = []
# with open(test_file_path, "r") as f:
#     csv_reader = csv.reader(f)
#     next(f)
#     for data_line in csv_reader:
#         one_line_data = np.array(data_line, np.float32)
#         cell_x = one_line_data[1]
#         cell_y = one_line_data[2]
#         cell_h = one_line_data[3]
#         azimuth = one_line_data[4]
Пример #27
0
model.add(Dense(2, input_dim=10, activation='softmax'))
#model.add(Dense(1, input_dim=10, activation='sigmoid'))

# 3. model fitting config
#model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])  #softmax 일때

# 4. model fitting
history = model.fit(train_x, train_t, epochs=100, batch_size=5, verbose=1)

# 5. result
loss = history.history['loss']
result = model.evaluate(train_x, train_t, verbose=0)
print(f'\n(Train Loss, Train Accuracy) = ({result[0], result[1]}')

# 6. save model
model_directory = os.path.join(os.getcwd(), 'model')
if not os.path.exists(model_directory):
    os.mkdir(model_directory)

model.save(os.path.join(model_directory, 'model.h5'))

# 7. overfitting
del model
model = load_model(os.path.join(model_directory, 'model.h5'))

result = model.evaluate(test_x, test_t, verbose=0)
print(f'\n(Test Loss, Test Accuracy) = ({result[0], result[1]}')
Пример #28
0
                    validation_data = test_generator,
                    validation_steps = test_generator.n//test_generator.batch_size
                    )
except KeyboardInterrupt:
    pass
end = time.time()
enc_time = end-start

print('Execution time:')
print(str(enc_time))
print()

"""# Save the model"""

filename = os.path.join(to_save, 'test2.h5')
model.save(filename)
print("\nModel saved successfully on file %s\n" %filename)

"""# Evaluate the model

Accuracy on test set
"""

#needed to put shuffle = false
test_generator = test_datagen.flow_from_directory(
    directory = testset,
    target_size = (150, 150),
    shuffle=False
)

val_steps = test_generator.n//test_generator.batch_size
Пример #29
0
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Dense
from util import *

train_data = pd.read_csv('train.csv')
x = embed_many(list(train_data['text']))
y = train_data[['start', 'retrieve', 'delete', 'total']].values

model = Sequential()
model.add(Dense(256, activation='relu', input_dim=512))
model.add(Dense(4, activation=tf.nn.softmax))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

model.fit(x, y, shuffle=True, epochs=20)
model.save('save/model.h5')
Пример #30
0
          input_dim=200,
          bias_initializer='one',
          kernel_regularizer=l2(0.0003),
          activation='tanh'),
    Dropout(0.4),
    Dense(units=10,
          input_dim=100,
          bias_initializer='one',
          kernel_regularizer=l2(0.0003),
          activation='softmax'),
])
sgd = SGD(lr=0.1)
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

#train
model.fit(x_train_data, y_train_data, batch_size=128, epochs=20)
model.save("01_tf_keras/sequential_model/weights/mnist_dnn.h5",
           include_optimizer=False,
           save_format='h5')

#evaluate of test data
loss, accuracy = model.evaluate(x_test_data, y_test_data)
print("test loss: ", loss)
print("test acc: ", accuracy)

#evaluate of train data
loss, accuracy = model.evaluate(x_train_data, y_train_data)
print("train loss:", loss)
print("train loss:", accuracy)