Exemple #1
0
def validation_loop(val_loader, network, epoch, parameters, timer_epoch):
    """
    validation_loop do a loop over the validation set
    :param val_loader: Dataloader which contains input and target of the validation dataset
    :param network: Network that will be learned
    :param epoch: Actual epoch of the program
    :param parameters: List of parameters of the network
    :param timer_epoch: The time since the beginning of the epoch
    :return: The mean validation_error over the entire validation set. This function also save this error.
    """
    # Validation_error contains the error on the validation set
    validation_error = 0

    # Save the error of the validation DataSet
    for i, (x_val_batch, y_val_batch, _) in enumerate(val_loader):

        if torch.cuda.is_available(): # TODO It is useless to creat a Variable for y
            x_val_batch, y_val_batch = Variable(x_val_batch.cuda()), Variable(y_val_batch.cuda())
        else:
            x_val_batch, y_val_batch = Variable(x_val_batch), Variable(y_val_batch)

        validation_error += Save_import.save_error(x=x_val_batch, y=y_val_batch,
                                                   network=network,
                                                   epoch=epoch,
                                                   set_type="validation",
                                                   parameters=parameters)

        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write(
                "\nEpoch : " + str(epoch) + ". Batch : " + str(i) + ".\nValidation error : " + str(
                    validation_error / (i + 1)) +
                ".\nTime total batch : " + Save_import.time_to_string(time.time() - timer_epoch) + "\n \n")

    # Divide by the the number of element in the entire batch
    return validation_error / (i + 1)
Exemple #2
0
def main_test_dataset(parameters, network, position_crop, dataset='val'):
    """
    :param position_crop: List of all position that the image will be crop. format : [(i , j, w, h),...]
    Which are the first pixel of width and height and the width and height of the crop
    :param dataset: Type of Dataset (train, val, test)
    :param parameters: list of parameters of the network
    :param network: network that will be learned
    :return: Nothing but save the predicted images of the chosen dataset.
    """

    # transforms_test
    parameters.transforms_test = transforms.Compose([
        transforms.ToTensor(),
    ])

    # Import both dataset with the transformation
    test_dataset = Save_import.cityscapes_create_dataset(quality='fine',
                                                         mode=dataset,
                                                         parameters=parameters,
                                                         transform=parameters.transforms_test,
                                                         only_image=False)

    with open("/home_expes/kt82128h/GridNet/Python_Files/Python_print_test.txt", 'w') as txtfile:
        txtfile.write(str(test_dataset.imgs[0]) + "\n" + str(test_dataset.imgs[1]))

    test_loop(network=network,
              parameters=parameters,
              test_dataset=test_dataset,
              dataset=dataset,
              position_crop=position_crop)
Exemple #3
0
def main_test(path_learning, dataset="train"):
    """
    :param path_learning:
    :param dataset:
    :return: Nothing but save the predicted images of the chosen dataset.
    """
    # Load the trained Network
    parameters, network = Save_import.load_from_checkpoint(
        path_checkpoint=path_learning)

    w = 801
    h = 801

    # We cannot put the entire image in one batch.
    # position_crop is a list with the position of each crop that will be made
    position_crop = []
    for k in range(2 * (int(parameters.width_image_initial / w)) + 1):
        for l in range(2 * (int(parameters.height_image_initial / h)) + 1):
            # i and w are associated with the x axis
            i = k * w // 2
            j = l * h // 2
            if i + w > parameters.width_image_initial:
                i = parameters.width_image_initial - w
            if j + h > parameters.height_image_initial:
                j = parameters.height_image_initial - h
            position_crop.append((i, j, w, h))

    for r in range(20):
        i = random.randint(0, parameters.width_image_initial - w)
        j = random.randint(0, parameters.height_image_initial - h)
        position_crop.append((i, j, w, h))

    # Compute and save the predictions and the original images
    end_name = Test_dataset.main_test_dataset(parameters=parameters,
                                              network=network,
                                              position_crop=position_crop,
                                              dataset=dataset)

    # Take the image that have been generate and make a copy with a color to make it easier to understand
    Manage_image.transform_image_to_RGB(path_data="./Result/",
                                        mode=dataset,
                                        from_picture=0,
                                        to_picture=11,
                                        end_name="prediction.png")
Exemple #4
0
def validation_loop(val_loader, network, epoch, parameters, timer_epoch):
    """
    validation_loop do a loop over the validation set
    :param val_loader: Dataloader which contains input and target of the validation dataset
    :param network: Network that will be learned
    :param epoch: Actual epoch of the program
    :param parameters: List of parameters of the network
    :param timer_epoch: The time since the beginning of the epoch
    :return: The mean validation_error over the entire validation set. This function also save this error.
    """

    # Save the error of the validation DataSet
    for i, (x_val_batch, y_val_batch) in enumerate(val_loader):

        if torch.cuda.is_available():
            x_val_batch, y_val_batch = Variable(x_val_batch.cuda()), Variable(
                y_val_batch.cuda())
        else:
            x_val_batch, y_val_batch = Variable(x_val_batch), Variable(
                y_val_batch)

        loss = Loss_Error.criterion_pretrain(y_estimated=network(x_val_batch),
                                             y=y_val_batch,
                                             parameters=parameters)

        loss = ["validation", epoch, loss.data[0]]

        # Save the loss
        with open(
                parameters.path_CSV + "CSV_loss_" + parameters.name_network +
                str(parameters.train_number) + ".csv", 'a') as csvfile:
            writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)
            writer.writerows([loss])

        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nEpoch : " + str(epoch) + ". Batch : " + str(i) +
                          ".\nValidation error : " + str(loss[2]) +
                          ".\nTime total batch : " +
                          Save_import.time_to_string(time.time() -
                                                     timer_epoch) + "\n \n")
Exemple #5
0
    def hitbox(mouse_pos, liste_rectangle):
        def hitbox_general(mouse_pos):

            if ((mouse_pos[0] >= liste_rectangle[i].x_depart and mouse_pos[0]
                 <= liste_rectangle[i].x_depart + liste_rectangle[i].largeur)
                    and
                (mouse_pos[1] >= liste_rectangle[i].y_depart and mouse_pos[1]
                 <= liste_rectangle[i].y_depart + liste_rectangle[i].hauteur)):

                return True

        def hitbox_select(mouse_pos):

            if ((mouse_pos[0] >= list_tile[x].x
                 and mouse_pos[0] <= list_tile[x].x + list_tile[i].u)
                    and (mouse_pos[1] >= list_tile[x].y
                         and mouse_pos[1] <= list_tile[x].y + list_tile[i].u)):

                return True

        def hitbox_centre(mouse_pos):

            if ((mouse_pos[0] >= list_car_centre[x].x_depart and mouse_pos[0]
                 <= list_car_centre[x].x_depart + list_car_centre[i].largeur)
                    and
                (mouse_pos[1] >= list_car_centre[x].y_depart and mouse_pos[1]
                 <= list_car_centre[x].y_depart + list_car_centre[i].hauteur)):
                return True

        for i in range(0, len(liste_rectangle)):

            if hitbox_general(mouse_pos):

                print(liste_rectangle[i].name)

                if liste_rectangle[i].name == "select":

                    for x in range(0, len(list_tile)):

                        if hitbox_select(mouse_pos):

                            print("i :x ", list_tile[x].x)
                            print(list_tile[x].name)
                            user.selected_block_sprite = x

                if liste_rectangle[i].name == "centre":

                    for x in range(0, len(list_car_centre)):

                        if hitbox_centre(mouse_pos):

                            k = list_car_centre[x].name
                            ligne = user.position_liste - 5 + int(k[11])
                            colonne = int(k[12])
                            map.current_map[ligne][
                                colonne] = user.selected_block_sprite

                if liste_rectangle[i].name == "plus":

                    if len(map.current_map) >= user.position_liste + 4:
                        map_edit = ajout_ligne_liste(liste=map.current_map)
                        user.position_liste += 1

                if liste_rectangle[i].name == "moins":

                    if user.position_liste > 5:
                        user.position_liste -= 1

                if liste_rectangle[i].name == "load":

                    map.current_map, user.save_file_name = Save_import.save_select(
                    )

                if liste_rectangle[i].name == "save":
                    user.save_file_name = Save_import.create_save_file(
                        map=map.current_map, previous_file=user.save_file_name)

        print(user.position_liste, "pos liste")
Exemple #6
0
def batch_loop(optimizer, train_loader, network, epoch, parameters, timer_batch, timer_epoch, inter_union=None):
    """
    :param optimizer: The optimiser that containt parameter of Adam optimizer
    :param train_loader: Dataloader which contains input and target of the train dataset
    :param network: Network that will be learned
    :param epoch: Actual epoch of the program
    :param parameters: List of parameters of the network
    :param timer_batch: The time since the beginning of the batch
    :param timer_epoch: The time since the beginning of the epoch
    :return: Nothing but update the network and save the train error
    """

    train_error = 0

    # Loop over the mini-batch, the size of the mini match is define in the train_loader
    for i, (x_batch, y_batch, _) in enumerate(train_loader):

        # zero the gradient buffers
        optimizer.zero_grad()

        # Transform into Variable
        if torch.cuda.is_available():
            x_batch, y_batch = Variable(x_batch.cuda()), Variable(y_batch.cuda())
        else:
            x_batch, y_batch = Variable(x_batch), Variable(y_batch)

        # Compute the forward function
        y_batch_estimated = network(x_batch)

        # Get the error
        loss = Loss_Error.criterion(y_estimated=y_batch_estimated,
                                    y=y_batch,
                                    parameters=parameters,
                                    global_IoU_modif=False)

        # Compute the backward function
        loss.backward()

        # Does the update according to the optimizer define above
        optimizer.step()

        # Save error of the training DataSet
        train_error += Save_import.save_error(x=x_batch, y=y_batch,
                                              network=network,
                                              epoch=epoch,
                                              set_type="train",
                                              parameters=parameters,
                                              loss=loss,
                                              y_estimated=y_batch_estimated)

        # Similar to a "print" but in a textfile
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write(
                "\nEpoch : " + str(epoch) + ". Batch : " + str(i) +
                ".\nTrain_Error : " + str(train_error / (i + 1)) +
                "\n" + "Time batch : " + Save_import.time_to_string(time.time() - timer_batch) +
                ".\nTime total batch : " + Save_import.time_to_string(time.time() - timer_epoch) + "\n \n")

        timer_batch = time.time()

    return ()
Exemple #7
0
def main(path_continue_learning=None, total_epoch=0, new_name=None):
    """
    :param path_continue_learning: Path were the network is already saved
                                   (don t use if it is the beginning of the training)
    :param total_epoch: Number of epoch needed don t use if it is the beginning of the training)
    :param new_name: New name of the network, if we want to use again a network already train.
    :return: Nothing but train the network and save CSV files for the error and also save the network regularly
    """
    # Manual seed of the network to have reproducible experiment
    torch.manual_seed(26542461)

    # If the network was already train we import it
    if path_continue_learning is not None:
        # Load the trained Network
        parameters, network = Save_import.load_from_checkpoint(path_checkpoint=path_continue_learning)

        # Here we can change some parameters, the only one necessary is the total_epoch
        parameters.epoch_total = total_epoch
        # parameters.learning_rate_decay = - 4.5 * 10 ** (-5)
        # parameters.batch_size = 4
        # parameters.batch_size_val = 4
        # parameters.learning_rate = 0.01
        # parameters.momentum_IoU = 0
        # parameters.loss = "IoU_Lovasz"

        # Put weight to GPU
        if torch.cuda.is_available():
            parameters.weight_grad = parameters.weight_grad.cuda()

        # If a new name is define, we create new CSV files associated and change the name of the network
        if new_name is not None:
            # Init the csv file that will store the error, this time we make a copy of the existing error
            Save_import.duplicated_csv(path_CSV=parameters.path_CSV,
                                       old_name_network=parameters.name_network,
                                       new_name_network=new_name,
                                       train_number=parameters.train_number)
            parameters.name_network = new_name

        with open(parameters.path_print, 'w') as txtfile:
            txtfile.write('\n               The program will continue \n')

    # If the network was not train, we start from scratch
    else:
        # Define the weight
        weight_grad = torch.FloatTensor([2.381681e+09, 3.856594e+08, 1.461642e+09, 4.291781e+07,
                                         5.597591e+07, 8.135516e+07, 1.328548e+07, 3.654657e+07,
                                         1.038652e+09, 7.157456e+07, 2.527450e+08, 7.923985e+07,
                                         9.438758e+06, 4.460595e+08, 1.753254e+07, 1.655341e+07,
                                         1.389560e+07, 6.178567e+06, 2.936571e+07])

        sum_grad = weight_grad.sum()
        # normalize and then take the invert
        for i in range(weight_grad.size(0)):
            weight_grad[i] = sum_grad / weight_grad[i]
        # Normalize again and mult by the number of classes
        weight_grad = (weight_grad / weight_grad.sum()) * weight_grad.size(0)

        # if you want to keep the wiehgt, comment the next line
        weight_grad = torch.FloatTensor([1 for i in range(19)])

        # Define all the parameters
        parameters = Parameters.Parameters(nColumns=8,
                                           nFeatMaps=[16, 32, 64, 128, 256],
                                           nFeatureMaps_init=3,
                                           number_classes=20 - 1,
                                           label_DF=Label.create_label(),

                                           width_image_initial=2048, height_image_initial=1024,
                                           size_image_crop=401,

                                           dropFactor=0.1,
                                           learning_rate=0.01,
                                           learning_rate_decay=1 * (10 ** (-2)),
                                           weight_decay=0,
                                           beta1=0.9,
                                           beta2=0.999,
                                           epsilon=1 * 10 ** (-8),
                                           batch_size=5,
                                           batch_size_val=5,
                                           epoch_total=400,
                                           actual_epoch=0,
                                           ratio=(1, 1),
                                           weight_grad=weight_grad,
                                           loss="focal_loss",
                                           momentum_IoU=0,

                                           path_save_net="./Model/",
                                           name_network="focal_loss2",
                                           train_number=0,
                                           path_CSV="./CSV/",
                                           path_data="/home_expes/collections/Cityscapes/",
                                           path_print="./Python_print_focal_loss.txt",
                                           path_result="./Result",
                                           num_workers=2)
        # Define the GridNet
        network = GridNet_structure.gridNet(nInputs=parameters.nFeatureMaps_init,
                                            nOutputs=parameters.number_classes,
                                            nColumns=parameters.nColumns,
                                            nFeatMaps=parameters.nFeatMaps,
                                            dropFactor=parameters.dropFactor)

        with open(parameters.path_print, 'w') as txtfile:
            txtfile.write('\n               Start of the program \n')

        # Init the csv file that will store the error
        Save_import.init_csv(name_network=parameters.name_network,
                             train_number=parameters.train_number,
                             path_CSV=parameters.path_CSV,
                             path_print=parameters.path_print)

    # Import both DataSets with the transformation
    train_dataset = Save_import.cityscapes_create_dataset(quality='fine',
                                                          mode='train',
                                                          transform=parameters.transforms_input,
                                                          transform_target=parameters.transforms_output,
                                                          parameters=parameters)

    val_dataset = Save_import.cityscapes_create_dataset(quality='fine',
                                                        mode='val',
                                                        transform=parameters.transforms_input,
                                                        transform_target=parameters.transforms_output,
                                                        parameters=parameters)

    # Create the DataSets for Pytorch used
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=parameters.batch_size,
                                               shuffle=True,
                                               num_workers=parameters.num_workers,
                                               drop_last=False)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=parameters.batch_size_val,
                                             shuffle=True,
                                             num_workers=parameters.num_workers,
                                             drop_last=False)

    # If there is more than one GPU we can use them
    if torch.cuda.device_count() > 1:
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nLet's use " + str(torch.cuda.device_count()) + " GPUs! \n")
        network = torch.nn.DataParallel(network)
    else:
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nWe don t have more than one GPU \n")
        # ... But we still use it in this case ? ... TODO try without to check if it is working
        network = torch.nn.DataParallel(network)

    # Put the network on GPU if possible
    if torch.cuda.is_available():
        network.cuda()
    else:
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nAccording to torch Cuda is not available \n")

    # Train the network
    train(network=network,
          parameters=parameters,
          train_loader=train_loader,
          val_loader=val_loader)
Exemple #8
0
def train(parameters, network, train_loader, val_loader):
    """
    :param parameters: List of parameters of the network
    :param network: Network that will be learned
    :param train_loader: Dataloader which contains input and target of the train dataset
    :param val_loader: Dataloader which contains input and target of the validation dataset
    :return: Nothing but modify the weight of the network and call save_error to store the error.
    """

    # Store the time at the beginning of the training
    timer_init = time.time()

    # create your optimizer
    optimizer = optim.Adam(params=network.parameters(),
                           lr=parameters.learning_rate,
                           betas=(parameters.beta1, parameters.beta2),
                           eps=parameters.epsilon,
                           weight_decay=parameters.weight_decay)

    # High value just to initialize this variable
    # validation error min will store the lowest validation result
    validation_error_min = 9999

    # Store the index of the next checkpoint. This value is 0 or 1. We always keep one checkpoint untouched
    # while the other one is changed.
    index_save_regular = 0

    # Loop from the actual epoch (not 0 if we already train) to the last epoch
    initial_epoch = parameters.actual_epoch
    for epoch in range(initial_epoch, parameters.epoch_total):
        # Store the time at the begining of each epoch
        timer_epoch = time.time()
        timer_batch = time.time()

        batch_loop(optimizer=optimizer,
                   train_loader=train_loader,
                   network=network,
                   epoch=epoch,
                   parameters=parameters,
                   timer_batch=timer_batch,
                   timer_epoch=timer_epoch)

        validation_error = validation_loop(val_loader=val_loader,
                                           network=network,
                                           epoch=epoch,
                                           parameters=parameters,
                                           timer_epoch=timer_epoch)

        # checkpoint will save the network if needed
        validation_error_min, index_save_regular = Save_import.checkpoint(validation_error=validation_error,
                                                                          validation_error_min=validation_error_min,
                                                                          index_save_regular=index_save_regular,
                                                                          epoch=epoch,
                                                                          network=network,
                                                                          parameters=parameters,
                                                                          optimizer=optimizer)

        # Update the optimizer
        # optimizer.param_groups[0]['lr'] = parameters.learning_rate / 10

        # Similar to a "print" but in a text file
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\n              End of Epoch :" + str(epoch) + "/" + str(parameters.epoch_total - 1) +
                          ". Validation Loss : " + str(validation_error) +
                          ".\nTime Epoch :" + Save_import.time_to_string(time.time() - timer_epoch) +
                          ".\nTime total : " + Save_import.time_to_string(time.time() - timer_init) +
                          ".\n \n")

        if (epoch % 10) == 0:
            Save_import.organise_CSV(path_CSV=parameters.path_CSV,
                                     name_network=parameters.name_network,
                                     train_number=parameters.train_number)

        # Increase the actual epoch
        parameters.actual_epoch += 1

    # Similar to a "print" but in a text file
    with open(parameters.path_print, 'a') as txtfile:
        txtfile.write("Finish. Total time : " + Save_import.time_to_string(time.time() - timer_init) +
                      "\n")
    return ()
Exemple #9
0
def batch_loop(optimizer, train_loader, network, epoch, parameters,
               timer_batch, timer_epoch):
    """
    :param optimizer: The optimiser that containt parameter of Adam optimizer
    :param train_loader: Dataloader which contains input and target of the train dataset
    :param network: Network that will be learned
    :param epoch: Actual epoch of the program
    :param parameters: List of parameters of the network
    :param timer_batch: The time since the beginning of the batch
    :param timer_epoch: The time since the beginning of the epoch
    :return: Nothing but update the network and save the train error
    """

    # Loop over the mini-batch, the size of the mini match is define in the train_loader
    for i, (x_batch, y_batch) in enumerate(train_loader):

        # zero the gradient buffers
        optimizer.zero_grad()

        # Transform into Variable
        if torch.cuda.is_available():
            x_batch, y_batch = Variable(x_batch.cuda()), Variable(
                y_batch.cuda())
        else:
            x_batch, y_batch = Variable(x_batch), Variable(y_batch)

        # Compute the forward function
        y_batch_estimated = network(x_batch)

        #count = 0
        #for child in network.children():
        #if count == 0:
        #for param in child.parameters():
        #with open(parameters.path_print, 'a') as txtfile:
        #txtfile.write("param of linear1"+str(param)+"\n")
        #break
        #count += 1

        # Get the error
        loss = Loss_Error.criterion_pretrain(y_estimated=y_batch_estimated,
                                             y=y_batch,
                                             parameters=parameters)

        # Compute the backward function
        loss.backward()

        # Does the update according to the optimizer define above
        optimizer.step()

        # Update the optimizer
        # optimizer.param_groups[0]['lr'] = parameters.learning_rate/(1 + (epoch-390) * parameters.learning_rate_decay)

        # Save error of the training DataSet
        loss = ["train", epoch, loss.data[0]]

        # Save the loss
        with open(
                parameters.path_CSV + "CSV_loss_" + parameters.name_network +
                str(parameters.train_number) + ".csv", 'a') as csvfile:
            writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)
            writer.writerows([loss])

        # Similar to a "print" but in a textfile
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write(
                "\nEpoch : " + str(epoch) + ". Batch : " + str(i) +
                ".\nTrain_Error : " + str(loss[2]) + "\n" + "Time batch : " +
                Save_import.time_to_string(time.time() - timer_batch) +
                ".\nTime total batch : " +
                Save_import.time_to_string(time.time() - timer_epoch) +
                "\n \n")

        timer_batch = time.time()

    return ()
Exemple #10
0
def main(path_continue_learning=None, total_epoch=0, new_name=None):
    """
    :param path_continue_learning: Path were the network is already saved
                                   (don t use if it is the beginning of the training)
    :param total_epoch: Number of epoch needed don t use if it is the beginning of the training)
    :param new_name: New name of the network, if we want to use again a network already train.
    :return: Nothing but train the network and save CSV files for the error and also save the network regularly
    """
    # Manual seed of the network to have reproducible experiment
    torch.manual_seed(945682461)

    # If the network was already train we import it
    if path_continue_learning is not None:
        # Load the trained Network
        parameters, network = Save_import.load_from_checkpoint(
            path_checkpoint=path_continue_learning)

        # Here we can change some parameters, the only one necessary is the total_epoch
        parameters.epoch_total = total_epoch
        # parameters.learning_rate_decay = 0.5 * (10 ** (-2))
        # parameters.batch_size = 5
        # parameters.batch_size_val = 5
        parameters.learning_rate = 0.001
        # parameters.momentum_IoU = 0.9

        # If a new name is define, we create new CSV files associated and change the name of the network
        if new_name is not None:
            # Init the csv file that will store the error, this time we make a copy of the existing error
            Save_import.duplicated_csv(
                path_CSV=parameters.path_CSV,
                old_name_network=parameters.name_network,
                new_name_network=new_name,
                train_number=parameters.train_number)
            parameters.name_network = new_name

        with open(parameters.path_print, 'w') as txtfile:
            txtfile.write('\n               The program will continue \n')

    # If the network was not train, we start from scratch
    else:
        # Define all the parameters
        parameters = Parameters.Parameters(
            nColumns=6,
            nFeatMaps=[16, 32, 64, 128],
            nFeatureMaps_init=3,
            number_classes=20 - 1,
            label_DF=Label.create_label(),
            width_image_initial=2048,
            height_image_initial=1024,
            size_image_crop=401,
            dropFactor=0.1,
            learning_rate=0.0001,
            learning_rate_decay=1 * (10**(-2)),
            weight_decay=0,
            beta1=0.9,
            beta2=0.999,
            epsilon=1 * 10**(-8),
            batch_size=40,
            batch_size_val=40,
            epoch_total=100,
            actual_epoch=0,
            ratio=(1, 1),
            weight_grad=torch.FloatTensor([1 for i in range(19)]),
            loss="cross_entropy_pretrain",
            momentum_IoU=0,
            pretrain=True,
            path_save_net="./Model/",
            name_network="resnet18_1000classes",
            train_number=0,
            path_CSV="./CSV/",
            # path_data="/home_expes/collections/Cityscapes/",
            path_data="/home_expes/collections/imagenet_1000_classes/",
            path_print="./Python_print_resnet18_1000classes.txt",
            path_result="./Result",
            num_workers=2)
        # Define the GridNet
        network = GridNet_structure.gridNet_imagenet(
            nInputs=parameters.nFeatureMaps_init,
            nOutputs=parameters.number_classes,
            nColumns=parameters.nColumns,
            nFeatMaps=parameters.nFeatMaps,
            dropFactor=parameters.dropFactor)
        network = GridNet_structure.ResNet18(
            nOutputs=len(Label.create_imagenet_class()))

        with open(parameters.path_print, 'w') as txtfile:
            txtfile.write('\n               Start of the program \n')

        # Init the csv file that will store the error
        Save_import.init_csv(name_network=parameters.name_network,
                             train_number=parameters.train_number,
                             path_CSV=parameters.path_CSV,
                             path_print=parameters.path_print)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # Import both DataSets with the transformation
    train_dataset = Save_import.cityscapes_create_dataset_pretrain(
        mode="train",
        parameters=parameters,
        sliding_crop=None,
        transform=transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))
    val_dataset = Save_import.cityscapes_create_dataset_pretrain(
        mode="val",
        parameters=parameters,
        sliding_crop=None,
        transform=transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    # Create the DataSets for Pytorch used
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=parameters.batch_size,
        shuffle=True,
        num_workers=parameters.num_workers,
        drop_last=False)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=parameters.batch_size_val,
        shuffle=True,
        num_workers=parameters.num_workers,
        drop_last=False)

    # If there is more than one GPU we can sue them
    if torch.cuda.device_count() > 1:
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nLet's use " + str(torch.cuda.device_count()) +
                          " GPUs! \n")
        network = torch.nn.DataParallel(network)

    else:
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nWe don t have more than one GPU \n")
            # ... But we still use it in this case ? ... TODO try without to check if it is still working
            # network = torch.nn.DataParallel(network)

    # Put the network on GPU if possible
    if torch.cuda.is_available():
        network.cuda()

    else:
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nAccording to torch Cuda is not even available \n")

    # Train the network

    train(network=network,
          parameters=parameters,
          train_loader=train_loader,
          val_loader=val_loader)