def prepare_evaluate_interactive_line_world(base_config_file: str,
                                            job_config_object: CondorJobConfig,
                                            number_of_jobs: int,
                                            output_path: str,
                                            model_directories: List[str] = None) -> List[CondorJob]:
    if number_of_jobs == 0:
        return []
    model_directories = [os.path.join(output_path, 'models', d)
                         for d in os.listdir(os.path.join(output_path, 'models'))] \
        if model_directories is None else model_directories

    # evaluate all models in 'number_of_jobs' line worlds
    worlds = [f'line_worlds/model_{i:03d}' for i in range(900, 900 + number_of_jobs)
              for _ in range(len(model_directories))]

    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments={
                                      '[\"data_saver_config\"][\"saving_directory_tag\"]':
                                          [f'{os.path.basename(d)}_{i}' for i in range(900, 900 + number_of_jobs)
                                           for d in model_directories],
                                      '[\"load_checkpoint_dir\"]':
                                          model_directories * number_of_jobs,
                                      translate_keys_to_string(['environment_config',
                                                                'ros_config',
                                                                'ros_launch_config',
                                                                'world_name']): worlds
                                  })
    return create_jobs_from_job_config_files(job_config_files=config_files,
                                             job_config_object=job_config_object)
def prepare_domain_adaptation(base_config_file: str,
                              job_config_object: CondorJobConfig,
                              number_of_jobs: int,
                              output_path: str) -> List[CondorJob]:

    domain_adaptation_criterions = ['MMDLossSimple', 'Coral']
    epsilons = [0.8, 0.5, 0.1, 0.0]
    learning_rates = [0.001]
    model_paths = [os.path.join(output_path, 'models', f'{dac}', f'eps_{eps}', f'lr_{lr}')
                   for dac in domain_adaptation_criterions
                   for eps in epsilons
                   for lr in learning_rates]

    adjustments = {translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'learning_rate']):
                       [lr for dac in domain_adaptation_criterions
                        for eps in epsilons
                        for lr in learning_rates],
                   translate_keys_to_string(['trainer_config', 'domain_adaptation_criterion']):
                       [dac for dac in domain_adaptation_criterions
                        for eps in epsilons
                        for lr in learning_rates],
                   translate_keys_to_string(['trainer_config', 'epsilon']):
                       [eps for dac in domain_adaptation_criterions
                        for eps in epsilons
                        for lr in learning_rates]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
def prepare_lr_architecture_line_world(base_config_file: str,
                                       job_config_object: CondorJobConfig,
                                       number_of_jobs: int,
                                       output_path: str) -> List[CondorJob]:
    learning_rates = [0.001]
    #architectures = ['auto_encoder_deeply_supervised_confidence']

    architectures = ['bc_deeply_supervised_auto_encoder',
                     'auto_encoder_deeply_supervised_share_weights']
    # 'auto_encoder_deeply_supervised_share_weights_confidence'
    batch_norm = [False]
    loss = ['WeightedBinaryCrossEntropyLoss']  # ['WeightedBinaryCrossEntropyLoss', 'MSELoss']

    model_paths = [os.path.join(output_path, 'models', arch, 'bn' if bn else 'default', ls, f'lr_{lr}', )
                   for arch in architectures
                   for lr in learning_rates
                   for bn in batch_norm
                   for ls in loss]
    adjustments = {translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'learning_rate']):
                   [lr for arch in architectures
                   for lr in learning_rates
                   for bn in batch_norm
                   for ls in loss],
                   translate_keys_to_string(['architecture_config', 'architecture']):
                   [arch for arch in architectures
                   for lr in learning_rates
                   for bn in batch_norm
                   for ls in loss],
                   translate_keys_to_string(['architecture_config', 'batch_normalisation']):
                   [bn for arch in architectures
                   for lr in learning_rates
                   for bn in batch_norm
                   for ls in loss],
                   translate_keys_to_string(['trainer_config', 'criterion']):
                   [ls for arch in architectures
                   for lr in learning_rates
                   for bn in batch_norm
                   for ls in loss],
                   translate_keys_to_string(['trainer_config', 'criterion_args_str']):
                   ['' if ls == 'MSELoss' else 'beta=0.9' for arch in architectures
                   for lr in learning_rates
                   for bn in batch_norm
                   for ls in loss],
                   translate_keys_to_string(['trainer_config', 'factory_key']):
                       ['DeepSupervisionConfidence' if 'confidence' in arch
                        else 'DeepSupervision'
                        for arch in architectures
                        for lr in learning_rates
                        for bn in batch_norm
                        for ls in loss],
                   }
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
def prepare_data_collection(base_config_file: str,
                            job_config_object: CondorJobConfig,
                            number_of_jobs: int,
                            output_path: str) -> List[CondorJob]:
    config_files = create_configs(
        base_config=base_config_file,
        output_path=output_path,
        adjustments={
            '[\"data_saver_config\"][\"saving_directory_tag\"]':
            [f'runner_{i}' for i in range(number_of_jobs)]
        })
    return create_jobs_from_job_config_files(
        job_config_files=config_files, job_config_object=job_config_object)
def prepare_default(base_config_file: str, job_config_object: CondorJobConfig,
                    number_of_jobs: int, output_path: str) -> List[CondorJob]:
    """Launch number of condor_jobs performing script with base_config"""
    if number_of_jobs == 0:
        return []
    default_configs = create_configs(
        base_config=base_config_file,
        output_path=output_path,
        adjustments={
            '[\"output_path\"]':
            [output_path + '_' + str(i) for i in range(number_of_jobs)],
        } if number_of_jobs > 1 else {})
    return create_jobs_from_job_config_files(
        default_configs, job_config_object=job_config_object)
Beispiel #6
0
def prepare_param_study(base_config_file: str,
                        job_config_object: CondorJobConfig,
                        number_of_jobs: int,
                        output_path: str) -> List[CondorJob]:
    seeds = [132 * n + 5100 for n in range(number_of_jobs)]
    model_paths = [os.path.join(output_path, f'sd_{seed}') for seed in seeds]
    adjustments = {translate_keys_to_string(['architecture_config',
                                             'random_seed']): [seed for seed in seeds],
                   translate_keys_to_string(['output_path']): model_paths}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
def prepare_lr_line_world(base_config_file: str,
                          job_config_object: CondorJobConfig,
                          number_of_jobs: int,
                          output_path: str) -> List[CondorJob]:
    learning_rates = [0.01, 0.001, 0.0001, 0.00001]
    model_paths = [os.path.join(output_path, 'models', f'lr_{lr}')
                   for lr in learning_rates]
    adjustments = {translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'learning_rate']):
                       [lr for lr in learning_rates]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
def prepare_dag_train_evaluate(base_config_files: List[str],
                               job_configs: List[CondorJobConfig],
                               number_of_jobs: List[int],
                               output_path: str) -> Dag:
    jobs = []
    # Add train jobs
    seeds = [123 * n + 5100 for n in range(number_of_jobs[0])]
    model_paths = [
        os.path.join(output_path, 'models', f'seed_{seed}') for seed in seeds
    ]
    config_files = create_configs(
        base_config=base_config_files[0],
        output_path=output_path,
        adjustments={
            '[\"architecture_config\"][\"random_seed\"]': seeds,
            '[\"output_path\"]': model_paths,
        })
    jobs.extend(
        create_jobs_from_job_config_files(job_config_files=config_files,
                                          job_config_object=job_configs[0]))
    # Add evaluate jobs
    jobs.extend(
        prepare_evaluate_interactive(base_config_file=base_config_files[1],
                                     job_config_object=job_configs[1],
                                     number_of_jobs=number_of_jobs[1],
                                     output_path=output_path,
                                     model_directories=model_paths))

    dag_lines = '# prepare_dag_train_evaluate: \n'
    for index, job in enumerate(jobs[:number_of_jobs[0]]):
        dag_lines += f'JOB training_{index} {job.job_file} \n'
    for index, job in enumerate(jobs[number_of_jobs[0]:number_of_jobs[0] +
                                     number_of_jobs[1]]):
        dag_lines += f'JOB evaluation_{index} {job.job_file} \n'

    number_of_links = min(number_of_jobs)
    for index in range(number_of_links):
        dag_lines += f'PARENT training_{index} CHILD evaluation_{index} \n'

    for index, job in enumerate(jobs[:number_of_jobs[0]]):
        dag_lines += f'Retry training_{index} 2 \n'
    for index, job in enumerate(jobs[number_of_jobs[0]:number_of_jobs[0] +
                                     number_of_jobs[1]]):
        dag_lines += f'Retry evaluation_{index} 3 \n'

    return Dag(lines_dag_file=dag_lines,
               dag_directory=os.path.join(output_path, 'dag',
                                          get_date_time_tag()))
Beispiel #9
0
def prepare_optimiser_study(base_config_file: str,
                            job_config_object: CondorJobConfig,
                            number_of_jobs: int,
                            output_path: str) -> List[CondorJob]:
    optimizers = ['SGD', 'Adam', 'Adadelta', 'RMSprop']
    seeds = [123 * n + 5961 for n in range(number_of_jobs)]
    model_paths = [os.path.join(output_path, 'models', f'sd_{seed}/opt_{opt}') for opt in optimizers for seed in seeds]
    adjustments = {translate_keys_to_string(['architecture_config',
                                             'random_seed']): seeds * len(optimizers),
                   translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'optimizer']):
                       [bs for bs in optimizers for _ in range(len(seeds))]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
Beispiel #10
0
def prepare_batch_size_study(base_config_file: str,
                             job_config_object: CondorJobConfig,
                             number_of_jobs: int,
                             output_path: str) -> List[CondorJob]:
    batch_sizes = [50, 100, 500, 1000, 5000]
    seeds = [123 * n + 5961 for n in range(number_of_jobs)]
    model_paths = [os.path.join(output_path, 'models', f'sd_{seed}/bs_{bs}') for bs in batch_sizes for seed in seeds]
    adjustments = {translate_keys_to_string(['architecture_config',
                                             'random_seed']): seeds * len(batch_sizes),
                   translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'data_loader_config', 'batch_size']):
                       [bs for bs in batch_sizes for _ in range(len(seeds))]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
Beispiel #11
0
def prepare_phi_study(base_config_file: str,
                      job_config_object: CondorJobConfig,
                      number_of_jobs: int,
                      output_path: str) -> List[CondorJob]:
    phi_keys = ["gae", "reward-to-go", "return", "value-baseline"]
    seeds = [123 * n + 5961 for n in range(number_of_jobs)]
    model_paths = [os.path.join(output_path, 'models', f'sd_{seed}/phi_{x}') for x in phi_keys for seed in seeds]
    adjustments = {translate_keys_to_string(['architecture_config',
                                             'random_seed']): seeds * len(phi_keys),
                   translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'phi_key']):
                       [x for x in phi_keys for _ in range(len(seeds))]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
Beispiel #12
0
def prepare_loss_study(base_config_file: str,
                       job_config_object: CondorJobConfig,
                       number_of_jobs: int,
                       output_path: str) -> List[CondorJob]:
    losses = ['MSELoss', 'L1Loss', 'SmoothL1Loss']
    seeds = [123 * n + 5961 for n in range(number_of_jobs)]
    model_paths = [os.path.join(output_path, 'models', f'sd_{seed}/loss_{loss}') for loss in losses for seed in seeds]
    adjustments = {translate_keys_to_string(['architecture_config',
                                             'random_seed']): seeds * len(losses),
                   translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'criterion']):
                       [bs for bs in losses for _ in range(len(seeds))]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
def prepare_train(base_config_file: str, job_config_object: CondorJobConfig,
                  number_of_jobs: int, output_path: str) -> List[CondorJob]:
    if number_of_jobs == 0:
        return []
    seeds = [123 * n + 5100 for n in range(number_of_jobs)]
    model_paths = [
        os.path.join(output_path, 'models', f'seed_{seed}') for seed in seeds
    ]
    config_files = create_configs(
        base_config=base_config_file,
        output_path=output_path,
        adjustments={
            '[\"architecture_config\"][\"random_seed\"]': seeds,
            '[\"output_path\"]': model_paths,
        })
    return create_jobs_from_job_config_files(
        job_config_files=config_files, job_config_object=job_config_object)
Beispiel #14
0
def prepare_entropy_study(base_config_file: str,
                          job_config_object: CondorJobConfig,
                          number_of_jobs: int,
                          output_path: str) -> List[CondorJob]:
    entropy_vals = [0.0, 0.1, -0.1]
    seeds = [123 * n + 5100 for n in range(number_of_jobs)]
    model_paths = [os.path.join(output_path, 'models', f'sd_{seed}/entr_{x}') for x in entropy_vals for seed in seeds]
    adjustments = {translate_keys_to_string(['architecture_config',
                                            'random_seed']): seeds * len(entropy_vals),
                   translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'entropy_coefficient']):
                       [x for x in entropy_vals for _ in range(len(seeds))]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
def prepare_data_collection_line_world(base_config_file: str,
                                       job_config_object: CondorJobConfig,
                                       number_of_jobs: int,
                                       output_path: str) -> List[CondorJob]:

    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments={
                                      '[\"data_saver_config\"][\"saving_directory_tag\"]':
                                          [f'runner_{i}' for i in range(number_of_jobs)],
                                      translate_keys_to_string(['environment_config',
                                                                'ros_config',
                                                                'ros_launch_config',
                                                                'world_name']):
                                          [f'line_worlds/model_{(750 + i):03d}' for i in range(number_of_jobs)],
                                  })
    return create_jobs_from_job_config_files(job_config_files=config_files,
                                             job_config_object=job_config_object)
Beispiel #16
0
def prepare_architecture_study(base_config_file: str,
                               job_config_object: CondorJobConfig,
                               number_of_jobs: int,
                               output_path: str) -> List[CondorJob]:
    seeds = [123 * n + 5100 for n in range(number_of_jobs)]
    architectures = ['adversarial_actor_critic', 'fleeing_actor_critic', 'tracking_actor_critic']
    model_paths = [os.path.join(output_path, 'models', a, f'sd_{seed}') for a in architectures for seed in seeds]
    adjustments = {translate_keys_to_string(['architecture_config',
                                             'random_seed']): [seed for a in architectures for seed in seeds],
                   translate_keys_to_string(['architecture_config',
                                             'architecture']): [a for a in architectures for seed in seeds],
                   translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'factory_key']):
                       ['APPO' if 'adversarial' in a else 'PPO' for a in architectures for seed in seeds]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
Beispiel #17
0
def prepare_ppo_kl_target_study(base_config_file: str,
                                job_config_object: CondorJobConfig,
                                number_of_jobs: int,
                                output_path: str) -> List[CondorJob]:
    kl_targets = [0.001, 0.005, 0.01, 0.05, 0.1]
    seeds = [123 * n + 5100 for n in range(number_of_jobs)]
    model_paths = [os.path.join(output_path, 'models', f'sd_{seed}/kl_{x}') for x in kl_targets for seed in seeds]
    adjustments = {translate_keys_to_string(['architecture_config',
                                            'random_seed']): seeds * len(kl_targets),
                   translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'kl_target']):
                       [x for x in kl_targets for _ in range(len(seeds))],
                   translate_keys_to_string(['trainer_config', 'factory_key']):
                       ['PPO' for x in kl_targets for _ in range(len(seeds))]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
Beispiel #18
0
def prepare_learning_rate_study(base_config_file: str,
                                job_config_object: CondorJobConfig,
                                number_of_jobs: int,
                                output_path: str) -> List[CondorJob]:
    learning_rates = [0.01, 0.001, 0.0001, 0.00001, 0.000001]
    seeds = [123 * n + 5961 for n in range(number_of_jobs)]
    model_paths = [os.path.join(output_path, 'models', f'sd_{seed}/lr_{lr}') for lr in learning_rates for seed in seeds]
    adjustments = {translate_keys_to_string(['architecture_config',
                                             'random_seed']): seeds * len(learning_rates),
                   translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'learning_rate']):
                       [bs for bs in learning_rates for _ in range(len(seeds))],
                   translate_keys_to_string(['trainer_config', 'actor_learning_rate']):
                       [bs for bs in learning_rates for _ in range(len(seeds))],
                   }
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
def prepare_ppo_max_train_steps_study(base_config_file: str,
                                      job_config_object: CondorJobConfig,
                                      number_of_jobs: int,
                                      output_path: str) -> List[CondorJob]:
    max_value_training_iterations = [1, 5, 10, 50]
    max_actor_training_iterations = [1, 5, 10, 50]
    seeds = [123 * n + 5100 for n in range(number_of_jobs)]
    model_paths = [
        os.path.join(output_path, 'models', f'sd_{seed}/p_{x}_v_{y}')
        for y in max_value_training_iterations
        for x in max_actor_training_iterations for seed in seeds
    ]
    adjustments = {
        translate_keys_to_string(['architecture_config', 'random_seed']):
        seeds * len(max_actor_training_iterations) *
        len(max_value_training_iterations),
        translate_keys_to_string(['output_path']):
        model_paths,
        translate_keys_to_string([
            'trainer_config', 'max_actor_training_iterations'
        ]): [
            x for _ in max_value_training_iterations
            for x in max_actor_training_iterations for _ in range(len(seeds))
        ],
        translate_keys_to_string([
            'trainer_config', 'max_critic_training_iterations'
        ]): [
            x for x in max_value_training_iterations
            for _ in max_actor_training_iterations for _ in range(len(seeds))
        ],
        translate_keys_to_string(['trainer_config', 'factory_key']): [
            'PPO' for _ in max_value_training_iterations
            for _ in max_actor_training_iterations for _ in range(len(seeds))
        ]
    }
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(
        config_files, job_config_object=job_config_object)
def prepare_wd_confidence_line_world(base_config_file: str,
                                     job_config_object: CondorJobConfig,
                                     number_of_jobs: int,
                                     output_path: str) -> List[CondorJob]:
    learning_rates = [0.1, 0.01, 0.001, 0.0001]
    #weight_decays = [0.0, 0.01, 0.001, 0.0001, ]
    weight_decays = [10, 1.0, 0.1]

    architectures = ['auto_encoder_deeply_supervised_confidence']

    model_paths = [os.path.join(output_path, 'models', arch, f'lr_{lr}', f'wd_{wd}')
                   for arch in architectures
                   for lr in learning_rates
                   for wd in weight_decays]
    adjustments = {translate_keys_to_string(['output_path']): model_paths,
                   translate_keys_to_string(['trainer_config', 'learning_rate']):
                   [lr for arch in architectures
                   for lr in learning_rates
                   for wd in weight_decays],
                   translate_keys_to_string(['architecture_config', 'architecture']):
                   [arch for arch in architectures
                   for lr in learning_rates
                   for wd in weight_decays],
                   translate_keys_to_string(['trainer_config', 'factory_key']):
                       ['DeepSupervisionConfidence' if 'confidence' in arch
                        else 'DeepSupervision'
                        for arch in architectures
                        for lr in learning_rates
                        for wd in weight_decays],
                   translate_keys_to_string(['trainer_config', 'confidence_weight']):
                       [wd for arch in architectures
                        for lr in learning_rates
                        for wd in weight_decays]}
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(config_files,
                                             job_config_object=job_config_object)
def prepare_lr_discirminator_line_world(base_config_file: str,
                                        job_config_object: CondorJobConfig,
                                        number_of_jobs: int,
                                        output_path: str) -> List[CondorJob]:
    learning_rates = [0.0001]
    critic_learning_rates = [0.01, 0.001]
    epsilon = [0.5]
    bns = [False]

    model_paths = [
        os.path.join(output_path, 'models',
                     'auto_encoder_deeply_supervised_with_discriminator',
                     f'model_lr_{lr}', f'discriminator_lr_{dlr}',
                     f'epsilon_{eps}', 'bn' if bn else 'default')
        for lr in learning_rates for dlr in critic_learning_rates
        for eps in epsilon for bn in bns
    ]
    adjustments = {
        translate_keys_to_string(['output_path']):
        model_paths,
        translate_keys_to_string(['trainer_config', 'learning_rate']): [
            lr for lr in learning_rates for dlr in critic_learning_rates
            for eps in epsilon for bn in bns
        ],
        translate_keys_to_string(['trainer_config', 'critic_learning_rate']): [
            dlr for lr in learning_rates for dlr in critic_learning_rates
            for eps in epsilon for bn in bns
        ],
        translate_keys_to_string(['trainer_config', 'epsilon']): [
            eps for lr in learning_rates for dlr in critic_learning_rates
            for eps in epsilon for bn in bns
        ]
    }
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(
        config_files, job_config_object=job_config_object)
def prepare_lr_wd_bn_line_world(base_config_file: str,
                                job_config_object: CondorJobConfig,
                                number_of_jobs: int,
                                output_path: str) -> List[CondorJob]:

    learning_rates = [0.001, 0.0001]
    batch_norm = [True, False]
    weight_decay = [0.001, 0.0001, 0.00001]

    model_paths = [
        os.path.join(output_path, 'models', 'bn' if bn else 'default',
                     f'wd_{wd}', f'lr_{lr}') for lr in learning_rates
        for wd in weight_decay for bn in batch_norm
    ]

    adjustments = {
        translate_keys_to_string(['output_path']):
        model_paths,
        translate_keys_to_string(['trainer_config', 'learning_rate']): [
            lr for lr in learning_rates for wd in weight_decay
            for bn in batch_norm
        ],
        translate_keys_to_string([
            'architecture_config', 'batch_normalisation'
        ]): [
            bn for lr in learning_rates for wd in weight_decay
            for bn in batch_norm
        ],
        translate_keys_to_string(['trainer_config', 'weight_decay']): [
            wd for lr in learning_rates for wd in weight_decay
            for bn in batch_norm
        ],
    }
    config_files = create_configs(base_config=base_config_file,
                                  output_path=output_path,
                                  adjustments=adjustments)
    return create_jobs_from_job_config_files(
        config_files, job_config_object=job_config_object)
def prepare_dag_data_collection_train_evaluate(
        base_config_files: List[str], job_configs: List[CondorJobConfig],
        number_of_jobs: List[int], output_path: str) -> Dag:
    jobs = []
    jobs.extend(
        prepare_data_collection(base_config_file=base_config_files[0],
                                job_config_object=job_configs[0],
                                number_of_jobs=number_of_jobs[0],
                                output_path=output_path))
    jobs.extend(
        prepare_default(base_config_file=base_config_files[1],
                        job_config_object=job_configs[1],
                        number_of_jobs=number_of_jobs[1],
                        output_path=output_path))

    # Add train jobs
    seeds = [123 * n + 5100 for n in range(number_of_jobs[2])]
    model_paths = [
        os.path.join(output_path, 'models', f'seed_{seed}') for seed in seeds
    ]
    config_files = create_configs(
        base_config=base_config_files[2],
        output_path=output_path,
        adjustments={
            '[\"architecture_config\"][\"random_seed\"]': seeds,
            '[\"output_path\"]': model_paths,
        })
    jobs.extend(
        create_jobs_from_job_config_files(job_config_files=config_files,
                                          job_config_object=job_configs[2]))
    # Add evaluate jobs
    jobs.extend(
        prepare_evaluate_interactive(base_config_file=base_config_files[3],
                                     job_config_object=job_configs[3],
                                     number_of_jobs=number_of_jobs[3],
                                     output_path=output_path,
                                     model_directories=model_paths))

    dag_lines = '# Prepare_dag_data_collection_train_evaluate: \n'
    # Define jobs:
    start_index = 0
    end_index = number_of_jobs[0]
    for index, job in enumerate(jobs[start_index:end_index]):
        dag_lines += f'JOB data_collection_{index} {job.job_file} \n'
    start_index = number_of_jobs[0]
    end_index = sum(number_of_jobs[0:2])
    assert end_index - start_index == 1
    for index, job in enumerate(jobs[start_index:end_index]):
        dag_lines += f'JOB data_cleaning {job.job_file} \n'
    start_index = sum(number_of_jobs[0:2])
    end_index = sum(number_of_jobs[0:3])
    for index, job in enumerate(jobs[start_index:end_index]):
        dag_lines += f'JOB training_{index} {job.job_file} \n'
    start_index = sum(number_of_jobs[0:3])
    end_index = sum(number_of_jobs[:])
    for index, job in enumerate(jobs[start_index:end_index]):
        dag_lines += f'JOB evaluation_{index} {job.job_file} \n'
    # Define links:
    dag_lines += f'PARENT {" ".join([f"data_collection_{i}" for i in range(number_of_jobs[0])])}' \
                 f' CHILD data_cleaning \n'
    dag_lines += f'PARENT data_cleaning' \
                 f' CHILD {" ".join([f"training_{i}" for i in range(number_of_jobs[2])])} \n'
    number_of_links = min(number_of_jobs[2:])
    for index in range(number_of_links):
        dag_lines += f'PARENT training_{index} CHILD evaluation_{index} \n'
    # Define retry numbers
    for index in range(number_of_jobs[0]):
        dag_lines += f'Retry data_collection_{index} 2 \n'
    dag_lines += f'Retry data_cleaning 3 \n'
    for index, job in enumerate(jobs[:number_of_jobs[2]]):
        dag_lines += f'Retry training_{index} 2 \n'
    for index, job in enumerate(jobs[:number_of_jobs[3]]):
        dag_lines += f'Retry evaluation_{index} 3 \n'
    # Create DAG object
    return Dag(lines_dag_file=dag_lines,
               dag_directory=os.path.join(output_path, 'dag',
                                          get_date_time_tag()))