# That's necesarry for getting the explosion time from the template
sed_directory = rappid_original_data + '/SEDs'

# get the lightcurves either generated using MOSFiT type 'mosfit'
# or using specral templates type 'templates'
generated_with = 'mosfit'

# get the DataHandler object who takes care of all the book keeping
thisDH = rappidDH.get_dhandler(generated_with, sed_directory=sed_directory)

# get the explosion times for the simulations
thisDH.get_explosion_times_from_template(ncpu=45)

# fit the lightcurves with the desired method (only 'mosfit' is good!)
method = 'mosfit'
fitter = Fitter.get_fitter(method)

logger.debug(f'fitter method {fitter.method_name} \n'
             f'job-id {fitter.job_id}')

missing_indice_file = f'{pickle_dir}/{thisDH.name}/{fitter.method_name}/missing_indices.txt'
fitter.fit_lcs(
    thisDH,
    tasks_in_group=100,
    # missing_indice_file=missing_indice_file  # to be used when repeating the fit
)

# make a selection of lightcurves based on the available photometry
thisDH.select_and_adjust_selection_string()

# get the results
from estimate_explosion_time.core.data_prep.data import DataHandler
from estimate_explosion_time.analyses.rappid_simulations import rappidDH
from estimate_explosion_time.core.fit_data.fitlauncher.fitlauncher import Fitter
from estimate_explosion_time.analyses.rappid_simulations.convert_to_pickle_files import \
    rappid_pkl_name, write_model_to_pickle, rappid_original_data
import os


# take the original data and convert it into pickles in the right format
# if not os.path.exists(pickle_filename):
#     convert_to_pickles()

convert_to_pickles()
# get the DataHandler object who takes care of all the book keeping
thisDH = ExampleDH.get_dhandler()
fitter = Fitter.get_fitter('mosfit')
thisDH.pickle_dir = fitter.get_output_directory(thisDH)
thisDH.use_method('mosfit', None)
thisDH.save_me()

# # fit the lightcurves with the desired method (only 'mosfit' is good!)
# method = 'mosfit'
# fitter = Fitter.get_fitter(method)
#
# logger.debug(
#     f'fitter method {fitter.method_name} \n'
#     f'job-id {fitter.job_id}'
# )
#
# missing_indice_file = f'{pickle_dir}/{thisDH.name}/{fitter.method_name}/missing_indices.txt'
# fitter.fit_lcs(rappidDH,
logger = get_custom_logger(main_logger_name)
logger.setLevel(logging.DEBUG)
logger.debug('logging level is DEBUG')

import argparse
from estimate_explosion_time.analyses.ZTF20abdnpdo import ZTF20abdnpdoDataHandler
from estimate_explosion_time.core.fit_data.fitlauncher.fitlauncher import Fitter
from estimate_explosion_time.core.analyse_fits_from_simulation.plots import Plotter

parser = argparse.ArgumentParser()
parser.add_argument('-fn', '--force_new', type=bool, default=False)
args = parser.parse_args()

method = 'mosfit'
dh = ZTF20abdnpdoDataHandler.get_dhandler(force_new=args.force_new)
fitter = Fitter(method)

missing_indice_file = f'{pickle_dir}/{dh.name}/{fitter.method_name}/missing_indices.txt'
fitter.fit_lcs(dh, reduce_mosfit_output=False, mock_fit=True)

dh.select_and_adjust_selection_string(select_all=True)
dh.results(force=True, make_plots=False)

plotter = Plotter(dh, method)
for i in range(dh.nlcs):
    plotter.plot_lc(i,
                    plot_corner=True,
                    plot_orig_template=False,
                    reduce_data=True)
    def collect_results(self, force=False):

        if force:
            logger.debug('forcing to collect data')

        if isinstance(self.collected_data, type(None)) or force:

            if self.job_id:
                job_status = wait_for_cluster(self.job_id)
            else:
                inp = input(
                    'No job id specified. Go on trying to collect fit results? [y/n] '
                )
                if inp in ['y', 'yes']:
                    job_status = True
                else:
                    raise ResultError('no job_id specified')

            # if all jobs are done, unset job id and continue with collecting results
            if job_status:

                self.job_id = None

                if len(os.listdir(self.pickle_dir)) != 0:

                    listed_pickle_dir = os.listdir(self.pickle_dir)

                    # indices are file name minus one as file names start at 1, indices at 0!
                    indices = [
                        int(file.split('.')[0]) for file in listed_pickle_dir
                        if not 'missing' in file and not file.startswith('.')
                    ]

                    # get a list of indices, that are not a file in result directory
                    missing_indices = []
                    for i in range(self.dhandler.nlcs):
                        if i not in indices:
                            missing_indices.append(i)

                    # if this list is not empty, try and fit the missing lightcurves
                    if len(missing_indices) > 0:

                        missing_indices_fname = f'{pickle_dir}/{self.dhandler.name}/{self.method}/missing_indices.txt'
                        with open(missing_indices_fname, 'w') as f:
                            for ind in missing_indices:
                                f.write(f"{ind}\n")
                        logger.info(
                            f'No result files for {len(missing_indices)} fits! '
                            f'Wrote list to {missing_indices_fname}')
                        inpt = input('Submit remaining lcs to fit? [y/n] ')

                        # if desired fit the remaining lightcurves, then exit
                        if inpt == 'y':
                            fitter = Fitter.get_fitter(self.method)
                            fitter.fit_lcs(
                                self.dhandler,
                                ntasks=len(missing_indices),
                                tasks_in_group=1,
                                missing_indice_file=missing_indices_fname)
                            logger.info('submitted fits, exiting ...')

                        inpt = input(
                            'continue without the missing results? [y/n] ')
                        if inpt not in ['y', 'yes']:
                            self.collect_results(force=force)
                            # sys.exit(0)

                    self.sub_collect_results(indices, listed_pickle_dir)
                    collected_data_filename = f'{self.pickle_dir}.pkl'
                    self.dhandler.collected_data = collected_data_filename
                    self.save_data()
                    self.dhandler.save_me()

                else:
                    raise ResultError(f'No result files in {self.pickle_dir}!')

            else:
                raise ResultError(f'Exiting before all tasks were done.')

        else:
            logger.debug('results were already collected')