Esempio n. 1
0
File: train.py Progetto: mrscp/mnist
    def __init__(self):
        super().__init__()
        print("Mode: Train")

        dataset = Dataset(self.get_data_location(self["train"]["dataset"]))
        x, y = next(dataset.generator(8))
        print(x.shape, y.shape)

        model = simple_cnn((28, 28, 1), 10)
        optimizer = Adam(0.001, beta_1=0.9)
        reduce_lro_n_plat = ReduceLROnPlateau(monitor='loss',
                                              factor=0.90,
                                              patience=20,
                                              verbose=1,
                                              mode='auto',
                                              min_delta=0.0001)
        early = EarlyStopping(monitor="loss", mode="min", patience=20)
        callbacks_list = [early, reduce_lro_n_plat]

        model.compile(loss=categorical_crossentropy,
                      optimizer=optimizer,
                      metrics=["accuracy"])
        model.fit(dataset.generator(batch_size=self["train"]["batch_size"]),
                  steps_per_epoch=self["train"]["steps_per_epoch"],
                  epochs=self["train"]["epochs"],
                  callbacks=callbacks_list)
        saved_model.save(
            model, self.get_data_location(self["main"]["model_location"], "1"))
        print("Model saved...\n")
Esempio n. 2
0
    def save(self, models_dir: str):
        """
        Save tf.keras model to models_dir

        Args:
            models_dir: path to directory to save the model
        """

        model_file = os.path.join(models_dir, "final")

        # Save model with default signature
        saved_model.save(self.model,
                         export_dir=os.path.join(model_file, "default"))
        """
        Save model with custom signatures

        Currently supported
        - signature to read TFRecord SequenceExample inputs
        """
        saved_model.save(
            self.model,
            export_dir=os.path.join(model_file, "tfrecord"),
            signatures=self._build_saved_model_signatures(),
        )
        self.logger.info("Final model saved to : {}".format(model_file))
Esempio n. 3
0
def save_tensorflow_saved_model(model, local_dir, filename, model_number="1"):

    from tensorflow.saved_model import save

    root_dir = os.path.join(local_dir, filename)
    sub_dir = os.path.join(root_dir, model_number)

    if not os.path.isdir(sub_dir):
        os.makedirs(sub_dir)

    save(model, sub_dir)

    return root_dir
Esempio n. 4
0
    def save(
        self,
        models_dir: str,
        preprocessing_keys_to_fns={},
        postprocessing_fn=None,
        required_fields_only: bool = True,
        pad_sequence: bool = False,
    ):
        """
        Save tf.keras model to models_dir

        Args:
            models_dir: path to directory to save the model
        """

        model_file = os.path.join(models_dir, "final")

        # Save model with default signature
        saved_model.save(self.model, export_dir=os.path.join(model_file, "default"))

        """
        Save model with custom signatures

        Currently supported
        - signature to read TFRecord SequenceExample inputs
        """
        saved_model.save(
            self.model,
            export_dir=os.path.join(model_file, "tfrecord"),
            signatures=define_serving_signatures(
                model=self.model,
                tfrecord_type=self.tfrecord_type,
                feature_config=self.feature_config,
                preprocessing_keys_to_fns=preprocessing_keys_to_fns,
                postprocessing_fn=postprocessing_fn,
                required_fields_only=required_fields_only,
                pad_sequence=pad_sequence,
                max_sequence_size=self.max_sequence_size,
            ),
        )
        self.logger.info("Final model saved to : {}".format(model_file))
def train_new_data(csv_files):
    # loop over every csv file in data directory and train a NN with it
    #for data_set in progress.bar(csv_files, expected_size=len(csv_files)):
    for data_set in csv_files:
        download_data = data_set.download_as_string()
        data = pandas.read_csv(StringIO(download_data.decode()))

        inputs = pandas.DataFrame()
        outputs = pandas.DataFrame()
        for i in range(len(data.columns)):
            if data.columns[i].startswith("im") or data.columns[i].startswith(
                    "re") or data.columns[i].startswith("mag") or data.columns[
                        i].startswith("ang"):  # s-parameters are outputs
                outputs[data.columns[i]] = data[data.columns[i]]
            else:
                inputs[data.columns[i]] = data[data.columns[
                    i]]  # anything else that is appearing in the csv must be an input
        inputs = inputs.to_numpy()
        outputs = outputs.to_numpy()
        #print(coloured.cyan("[*] Now training model for: %s" % (data_set.name)))
        print("[*] Now training model for: %s" % (data_set.name))
        new_model = tensorflow_train(inputs, outputs)
        filename = data_set.name.split("/")[-1].split(
            "."
        )[-2]  # name trained models the same as the csv file, but without the extension
        try:
            saved_model.save(new_model, "/tmp/" + filename)
            # you could probably pop a shell here with a malicious filename, but what's life without some excitement?
            os.system("gsutil cp -R '/tmp/" + filename + "' gs://" +
                      config["DEFAULT"]["bucket_id"] + "/models")
            shutil.rmtree("/tmp/" + filename)
        except:
            #print(coloured.red("[!!] Problem writing trained model to disk. Does the path specified in the config exist and have write permissions for this user?"))
            print(
                "[!!] Problem writing trained model to disk. Does the path specified in the config exist and have write permissions for this user?"
            )
            pass
# CALLBACKS
# early stopping
es_cb = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto')

# TRAIN WITH MIXUP
batch_size = 32
epochs = 10
training_generator = MixupGenerator(x_train, y_train)()
print("***start MIXUP training***")
model.fit_generator(generator=training_generator,
                    steps_per_epoch=x_train.shape[0] // batch_size,
                    validation_data=(x_test, y_test),
                    epochs=epochs,
                    verbose=1,
                    shuffle=True,
                    callbacks=[es_cb])

print("training finished!")

# make SavedModel
saved_model.save(model, "./saved_model2_mixup")
print("stored trained model as <<saved_model>>")

# EVALUATION
evaluation = model.evaluate(x_test, y_test)
print(evaluation)
print("evaluation finished!")

keras.backend.clear_session()
gc.collect()
(X_train, y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

X_train = normalize(X_train)
X_test = normalize(X_test)

pickle.dump(X_test, open("X_test.pickle", "wb"))
pickle.dump(y_test, open("y_test.pickle", "wb"))

model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=X_train.shape[1:], activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dense(10, activation="softmax"))

model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])

model.fit(X_train, y_train, batch_size=32, epochs=1)

saved_model.save(model, "mnist/1/")

print(model.evaluate(X_test, y_test))
Esempio n. 8
0
                  verbose=0,
                  shuffle=True,
                  batch_size=32,
                  validation_split=0.1)
        end = time.time() - start
        times_per_run.append(end)

        y_pred = model.predict(X_test)[:, 0]
        # for testing per sample forward pass time
        # model.evaluate(X_test, normalize(y_test), batch_size=X_test.shape[0])
        y_pred = denormalize_np(y_pred, 0, N)
        ypred_per_run.append([(x.item(), y.item())
                              for x, y in zip(y_test, y_pred)])
        qerror = np.mean(q_loss_np(y_test, y_pred))

        if qerror < last_qerror:
            best_model = model
            last_qerror = qerror
        qerror_per_run.append(qerror)

    print("Average q-error: {:.2f}, Best q-error: {:.2f}".format(
        np.mean(qerror_per_run), last_qerror))

    with open("pred.json", "w") as output_file:
        json.dump(ypred_per_run, output_file)

    if config["model_file"].endswith("h5"):
        best_model.save(config["model_file"])
    else:
        saved_model.save(best_model, config["model_file"])
Esempio n. 9
0
 def save_agent(self, save_dir):
     saved_model.save(self.actor, save_dir + "actor")
     saved_model.save(self.critic, save_dir + "critic")