Ejemplo n.º 1
0
            # trainnewGAN_task.execute("6", "eccv", "exp_pretrained_F_IL")
            # train_da.execute("8", "eccv", "all_da_v2")
            # train_da_shared.execute("1", "eccv", "all_da_shared_E1E12_l1_10")
            # train_da_shared.execute("9", "eccv", "all_da_shared_E1E12_l1_25_aug")
            # train_da_shared.execute("3", "eccv", "all_da_shared_E1E12_l1_25_new_nopretrained")
            # train_wdgrl_withoutgen.execute("0", "eccv", "exp_wdgrl_better")
            # train_da_no5.execute("8", "eccv", "all_da_aug_no5_E1E12")
            # testGAN.execute("7", "eccv", "all_da_orig")
            # trainnewGAN.execute("4", "eccv", "exp_reconstruct")
            # trainnewGAN.execute("4", "eccv", "experiment_1")

        if args.single_process == 'validation':
            execute_validation("0", "eccv", "experiment_1", "SeqVal")

        if args.single_process == 'drive':
            execute_drive("0", "eccv", "experiment_1", 'Town02')

    else:

        # TODO: of course this change from gpu to gpu , but for now we just assume at least a K40

        # Maybe the latest voltas will be underused
        # OBS: This usage is also based on my tensorflow experiences, maybe pytorch allows more.
        allocation_parameters = {
            'gpu_value': 3.5,
            'train_cost': 2,
            'validation_cost': 1.5,
            'drive_cost': 1.5
        }

        params = {
Ejemplo n.º 2
0
        elif args.single_process == 'validation':
            if len(args.validation_datasets) == 1:
                execute_validation("0", args.folder, args.exp,
                                   args.validation_datasets[0], args.model,
                                   False)
            else:
                execute_validation("0", args.folder, args.exp,
                                   args.validation_datasets, args.model, False)

        elif args.single_process == 'drive':

            driving_environments = fix_driving_environments(
                list(args.driving_environments))
            execute_drive("0",
                          args.folder,
                          args.exp,
                          driving_environments[0],
                          False,
                          no_screen=args.no_screen)
        else:

            raise (
                " Invalid name for single process, chose from (train, validation, test)"
            )

    else:

        # TODO: of course this change from gpu to gpu , but for now we just assume at least a K40

        # Maybe the latest voltas will be underused
        # OBS: This usage is also based on my tensorflow experiences, maybe pytorch allows more.
        allocation_parameters = {
Ejemplo n.º 3
0
            execute_train(gpu="0",
                          exp_batch=args.folder,
                          exp_alias=args.exp,
                          suppress_output=False,
                          number_of_workers=args.number_of_workers)

        elif args.single_process == 'validation':
            execute_validation(gpu="0",
                               exp_batch=args.folder,
                               exp_alias=args.exp,
                               dataset=args.validation_datasets[0],
                               suppress_output=False)

        elif args.single_process == 'drive':
            drive_params['suppress_output'] = False
            execute_drive("0", args.folder, args.exp,
                          list(args.driving_environments)[0], drive_params)

        else:
            raise Exception(
                "Invalid name for single process, chose from (train, validation, test)"
            )

    else:
        ####
        # MODE 2: Folder execution. Execute train/validation/drive for all experiments on
        #         a certain training folder
        ####
        # We set by default that each gpu has a value of 3.5, allowing a training and
        # a driving/validation
        allocation_parameters = {
            'gpu_value': args.gpu_value,