def data():
    """
    Here we load the training set and the validation set with their labels without any data augmentation
    In fact, we don't want data augmentation to influence the optimisation of hyperparameters
    :return: training set, validation set, training labels, validation labels
    """
    baseDir = r"D:\Arnaud\data_croutinet\ottawa\data"
    trainDir = os.path.join(baseDir, "train/train.csv")
    validationDir = os.path.join(baseDir, "validation/validation.csv")
    trainLeft, trainRight, trainLabels = loadAsScalars(trainDir)
    validationLeft, validationRight, validationLabels = loadAsScalars(
        validationDir)

    X_train = [trainLeft, trainRight]
    y_train = trainLabels
    X_test = [validationLeft, validationRight]
    y_test = validationLabels

    return X_train, X_test, y_train, y_test
Example #2
0
#Define the img size
from keras.models import load_model

IMG_SIZE = 224
INPUT_DIM = (IMG_SIZE, IMG_SIZE, 3)

#Define directories
baseDir = r"D:\Arnaud\data_croutinet\ottawa\data"
trainDir = os.path.join(baseDir, "validation/validation.csv")
roads_loubna_dir = os.path.join(baseDir, "roads_loubna")
models_dir = os.path.join(baseDir, "models")

model = load_model(os.path.join(models_dir, "modelWithDataAugmentation5.h5"))

left, right, labels, namesLeft, namesRight = loadAsScalars(trainDir)

croutipointsLeft = [
    CroutiPoint(namesLeft[i], left[i]) for i in range(len(namesLeft))
]
croutipointsRight = [
    CroutiPoint(namesRight[i], right[i]) for i in range(len(namesRight))
]

# Training left points
for i in range(len(croutipointsLeft)):
    print(i)
    contenders = [rd.randint(0, len(croutipointsLeft) - 1) for k in range(30)]
    predictions = model.predict([
        np.array([croutipointsLeft[i].pixels for k in range(30)]),
        np.array([croutipointsLeft[contenders[k]].pixels for k in range(30)])
Example #3
0
def plotScoresAndHeatmap():
    validationLeft, validationRight, validationLabels, namesLeft, namesRight = loadAsScalars(
        validationDir)

    validationLeft_score = model.predict(validationLeft)
    validationLeft_score = validationLeft_score * (-1)

    validationRight_score = model.predict(validationRight)
    validationRight_score = validationRight_score * (-1)

    prediction = []
    correct = 0

    for i in range(len(validationLeft_score)):
        if validationLeft_score[i] > validationRight_score[i]:
            if validationLabels[i] == 0:
                prediction.append("prédiction vraie")
                correct += 1
            else:
                prediction.append("prédiction fausse")
        else:
            if validationLabels[i] == 0:
                prediction.append("prédiction fausse")
            else:
                prediction.append("prédiction vraie")
                correct += 1

    dict = {0: "left winner", 1: "right winner"}

    for i in range(len(validationLabels)):
        plt.figure()

        leftImage = validationLeft[i]
        rightImage = validationRight[i]
        leftPath = os.path.join(roads_loubna_dir, namesLeft[i])
        rightPath = os.path.join(roads_loubna_dir, namesRight[i])
        heatmapLeft = heatmap(leftImage, leftPath)
        heatmapRight = heatmap(rightImage, rightPath)
        cv2.imwrite(
            os.path.join(
                activation_dir, "validationSetLeft" + str(i) + "score" +
                str(validationLeft_score[i]) + ".jpg"), heatmapLeft)
        cv2.imwrite(
            os.path.join(
                activation_dir, "validationSetRight" + str(i) + "score" +
                str(validationRight_score[i]) + ".jpg"), heatmapRight)

        plt.subplot(2, 2, 1)
        plt.title("score : " + str(validationLeft_score[i]))
        plt.imshow(loadImage(leftPath))

        plt.suptitle(dict[validationLabels[i]] + " : " + prediction[i],
                     fontsize=16)

        plt.subplot(2, 2, 2)
        plt.title("score : " + str(validationRight_score[i]))
        plt.imshow(loadImage(rightPath))

        plt.subplot(2, 2, 3)
        plt.imshow(
            loadImage(
                os.path.join(
                    activation_dir, "validationSetLeft" + str(i) + "score" +
                    str(validationLeft_score[i]) + ".jpg")))

        plt.subplot(2, 2, 4)
        plt.imshow(
            loadImage(
                os.path.join(
                    activation_dir, "validationSetRight" + str(i) + "score" +
                    str(validationRight_score[i]) + ".jpg")))

        plt.savefig(os.path.join(check_dir, "validationSet" + str(i) + ".jpg"))
INPUT_DIM = (IMG_SIZE, IMG_SIZE, 3)

#Define directories
baseDir = r"D:\Arnaud\data_croutinet\ottawa\data"
trainDir = os.path.join(baseDir, "train/train.csv")
validationDir = os.path.join(baseDir, "validation/validation.csv")
testDir = os.path.join(baseDir, "test/test.csv")
roads_loubna_dir = os.path.join(baseDir, "roads_loubna")
ranking_dir = os.path.join(baseDir, "rankingNoSigmoid")
activation_dir = os.path.join(ranking_dir, "activation")
check_dir = os.path.join(ranking_dir, "checkdata")
models_dir = os.path.join(baseDir, "models")

model = load_model(os.path.join(models_dir, "scoreNetworkNoSigmoid.h5"))

validationLeft, validationRight, validationLabels, namesLeft, namesRight = loadAsScalars(
    validationDir)


def mutliplePredict(data, nb_predict):
    """
    Compute multipes predict on same dataset
    :param data: the array of puictures
    :param nb_predict: numbre of predictions that you wannt
    :return: the mean of predictions for each value
    """
    scores_array = []

    for i in range(nb_predict):
        scores_array.append(model.predict(data))

    scores_array = np.array(scores_array)
#Define the img size
IMG_SIZE = 224
INPUT_DIM = (IMG_SIZE, IMG_SIZE, 3)

#Define directories
baseDir = r"D:\Arnaud\data_croutinet\ottawa\data"
trainDir = os.path.join(baseDir, "train/train.csv")
validationDir = os.path.join(baseDir, "validation/validation.csv")

base_network_save = os.path.join(baseDir, "scoreNetworkRetrain2.h5")
ranking_network_save = os.path.join(baseDir, "rankingNetworkRetrain.h5")

base_network_save2 = os.path.join(baseDir, "scoreNetworkRetrain3.h5")

#load training and validation set with labels as scalars between 0 and 1
trainLeft, trainRight, trainLabels = loadAsScalars(trainDir)
validationLeft, validationRight, validationLabels = loadAsScalars(
    validationDir)

#Here is the architecture of ScoreCroutinet that we create below
base_network = load_model(base_network_save)
model = create_meta_network(INPUT_DIM, base_network)

#We fit the model to the training set
history = model.fit([trainLeft, trainRight],
                    trainLabels,
                    batch_size=16,
                    epochs=30,
                    validation_data=([validationLeft,
                                      validationRight], validationLabels))
Example #6
0
duelsDF['left_id'] = roads_loubna_dir + "/" + duelsDF['left_id']
duelsDF['right_id'] = roads_loubna_dir + "/" + duelsDF['right_id']
#print(duelsDF)

mask_yes = duelsDF['winner'] == '1'
yes = duelsDF[mask_yes]

mask_no = duelsDF['winner'] == '0'
no = duelsDF[mask_no]

# Here we load the hyperas optimiszed version of ScoreCroutinet
base_network = create_base_network(INPUT_DIM)
# On top of it we add the Ranking one who teach ScoreCroutinet how to score
model = create_meta_network(INPUT_DIM, base_network)

validationLeft, validationRight, validationLabels = loadAsScalars(
    validationDir)

# For batch training, the number of iterations of training model
n_iter = 150
for iteration in range(n_iter):
    print(iteration / n_iter)

    # sample positive and negative cases for current iteration. It is faster to use fit on batch of n yesno and augment
    # that batch using datagen_class_aug_test than to use fit_generator with the datagen_class_aug_test and small batch
    # sizes.
    yesno = yes.sample(500).append(no.sample(500))
    print('yesno created')
    labels = dict(
        zip([str(x) for x in yesno.index.tolist()],
            [1 if x == '1' else 0 for x in yesno.winner.tolist()]))
    print('labels created')