Beispiel #1
0
def train_get_loss(imagettes_train,
                   nbr_entrainement,
                   name_train,
                   nbr=5,
                   coef=1.3,
                   flip=True):

    imagettes_train_copy = imagettes_train.copy()
    images_2, labels_2, labels2_2 = common_2.read_imagettes(
        imagettes_train_copy, nbr=nbr, coef=coef, flip=flip)
    images_2 = np.array(images_2, dtype=np.float32) / 255
    labels_2 = np.array(labels_2, dtype=np.float32)
    train_ds_2 = tf.data.Dataset.from_tensor_slices(
        (images_2, labels_2)).batch(batch_size)
    Model = model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y,
                        config.cellule_x)

    chdir(path_model_saved)
    string = path_model_saved + name_train
    optimizer = tf.keras.optimizers.Adam(learning_rate=1E-4)
    #checkpoint=tf.train.Checkpoint(model=Model)
    train_loss = tf.keras.metrics.Mean()
    checkpoint = tf.train.Checkpoint(model=Model)
    checkpoint.restore(tf.train.latest_checkpoint(string))
    LOSS = common_2.train(train_ds_2, nbr_entrainement, string, labels2_2,
                          optimizer, Model, train_loss, checkpoint)
    checkpoint.save(file_prefix=string)

    return LOSS
Beispiel #2
0
    "/mnt/BigFast/VegaFastExtension/Rpackages/c3po_all/c3po/Images_aquises/imagettes.csv"
)
imagettes = common.to_reference_labels(imagettes, "classe")
index_train, index_test = common.split(imagettes)
imagettes_train = imagettes[imagettes["filename"].isin(index_train)]
name_train = "Nom_a renseigner"
string = path_model_saved + name_train
nbr_entrainement = 1

nbr = 5
coef = 1.3
flip = False

imagettes_train_copy = imagettes_train.copy()
images_2, labels_2, labels2_2 = common_2.read_imagettes(imagettes_train_copy,
                                                        nbr=nbr,
                                                        coef=coef,
                                                        flip=flip)
images_2 = np.array(images_2, dtype=np.float32) / 255
labels_2 = np.array(labels_2, dtype=np.float32)
train_ds_2 = tf.data.Dataset.from_tensor_slices(
    (images_2, labels_2)).batch(batch_size)
Model = model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y,
                    config.cellule_x)

imagettes_train_2 = imagettes[imagettes["filename"].isin(index_test)]
imagettes_train_2_copy = imagettes_train_2.copy()
images_2_2, labels_2_2, labels2_2_2 = common_2.read_imagettes(
    imagettes_train_2_copy, nbr=nbr, coef=coef, flip=flip)
images_2_2 = np.array(images_2_2, dtype=np.float32) / 255
labels_2_2 = np.array(labels_2_2, dtype=np.float32)
train_ds_2_2 = tf.data.Dataset.from_tensor_slices(
Beispiel #3
0
import common_2 as common
import config
import model
import pandas as pd
from sklearn.model_selection import train_test_split

batch_size = 16
nbr_entrainement = 1

imagettes = pd.read_csv(
    "/mnt/VegaSlowDataDisk/c3po/Images_aquises/imagettes.csv")
imagettes = common.to_reference_labels(imagettes, "classe")
index_train, index_test = common.split(imagettes)
imagettes_train = imagettes[imagettes["filename"].isin(index_train)]

images, labels, labels2 = common.read_imagettes(imagettes_train, nbr=1)
images = np.array(images, dtype=np.float32) / 255
labels = np.array(labels, dtype=np.float32)
print("Nbr images:", len(images))
train_ds = tf.data.Dataset.from_tensor_slices(
    (images, labels)).batch(batch_size)

Model = model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y,
                    config.cellule_x)

string = "/mnt/VegaSlowDataDisk/c3po_interface_mark/Materiels/Models/Yolo_models/training/"

optimizer = tf.keras.optimizers.Adam(learning_rate=1E-4)
checkpoint = tf.train.Checkpoint(model=Model)
train_loss = tf.keras.metrics.Mean()
checkpoint = tf.train.Checkpoint(model=model)
Beispiel #4
0
import common
import common_2

batch_size = 16

path_model_saved = "/mnt/VegaSlowDataDisk/c3po_interface_mark/Materiels/Models/Yolo_models/"

imagettes = pd.read_csv(
    "/mnt/BigFast/VegaFastExtension/Rpackages/c3po_all/c3po/Images_aquises/imagettes.csv"
)
imagettes = common.to_reference_labels(imagettes, "classe")
index_train, index_test = common.split(imagettes)
imagettes_train = imagettes[imagettes["filename"].isin(index_train[:5])]

images_2, labels_2, labels2_2 = common_2.read_imagettes(imagettes_train,
                                                        nbr=5,
                                                        coef=1.3,
                                                        flip=True)
images_2 = np.array(images_2, dtype=np.float32) / 255
labels_2 = np.array(labels_2, dtype=np.float32)
train_ds_2 = tf.data.Dataset.from_tensor_slices(
    (images_2, labels_2)).batch(batch_size)

Model = model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y,
                    config.cellule_x)

# note: plein de modèles entrainés qui sont mis dans différents sous-répertoires
# un sous-répertoire correspond à un modèle pour Yolo
string = path_model_saved + "generateur_avec_flip_1000/"

optimizer = tf.keras.optimizers.Adam(learning_rate=1E-4)
checkpoint = tf.train.Checkpoint(model=Model)