Esempio n. 1
0
 def __init__(self,configFile = None, action = None):
     self._pwd = os.path.dirname(__file__)
     if configFile is None:
         configFile = os.path.join(self._pwd,'config.yaml')
     self._config = Config.read(configFile)
     self._defs = ecflow.Defs()
     self._suite= self._defs.add_suite(self._config['preferences']['suite_name'])
     super(Suite,self).__init__(self._suite)
Esempio n. 2
0
#!/usr/bin/env python2.7
from __future__ import print_function
import os
import ecflow
# When no arguments is specified, Client uses bash variables ECF_HOST, ECF_PORT
HOST = os.getenv("ECF_HOST", "localhost")
PORT = int(os.getenv("ECF_PORT", "%d" % (1500 + os.getuid())))
NAME = os.getenv("SUITE", "elearning")
# ecflow_start.sh gives port number 1500+uid:
CLIENT = ecflow.Client(HOST + ":%d" % PORT)
# multiple ways to create a client:
# python -c "import ecflow; help(ecflow.Client)"
try:
    CLIENT.ping()
except RuntimeError as err:
    print("#ERR: ping failed: " + str(err))
try:  # read definition from disk and load into the server:
    CLIENT.load("%s.def" % NAME)
except RuntimeError as err:
    CLIENT.replace("/%s" % NAME, "%s.def" % NAME)
DEBUG = True  # DEBUG = False
if DEBUG:
    print("Checking job creation: .ecf -> .job0")
    print(ecflow.Defs("%s.def" % NAME).check_job_creation())
Esempio n. 3
0
import ecflow

# define suite
defs = ecflow.Defs()
suite = defs.add_suite('test')
# update ECF_HOME and ECF_INCLUDE variables for entire suite
suite.add_variable('ECF_HOME', '/home/eclark/ecflow_demo')
suite.add_variable('ECF_INCLUDE', '/home/eclark/ecflow_demo/include')
# add task t1
suite.add_task('t1')
# add task t2
suite.add_task('t2').add_trigger('t1 eq complete')
# save definition file
defs.save_as_defs('test.def')
Esempio n. 4
0
 def __init__(self, name):
     self.name = name
     self.defs = ecflow.Defs()
     self.suite = self.defs.add_suite(name)
     self.checked = False
def build_suite():
    """
    Build the ecflow suite.
    """
    logger.info('Building suite.')

    # ========================
    # GENERAL SUITE PROPERTIES
    # ========================

    defs = ecflow.Defs()
    suite = defs.add_suite(mysuite)

    # Set suite level variables
    set_vars(suite)

    # Set default status
    suite.add_defstatus(ecflow.DState.suspended)

    # Define thread limits
    suite.add_limit("mpmd_threads", mpmd_threads_number)
    suite.add_limit("serial_threads", serial_threads_number)

    # ========================
    # ADD CRON JOB
    # ========================

    start = ecflow.TimeSlot(0, 0)
    finish = ecflow.TimeSlot(23, 59)
    incr = ecflow.TimeSlot(0, 1)
    time_series = ecflow.TimeSeries(start, finish, incr, False)
    cron = ecflow.Cron()
    cron.set_time_series(time_series)
    fam_submit = suite.add_family('queue_submitter')
    submit = fam_submit.add_task('submit')
    submit.add_cron(cron)
    fam_submit.add_variable('ECF_JOB_CMD', ecgate_job_cmd)

    # ========================
    # DEFINE TOPLEVEL FAMILIES
    # ========================

    fam_proc = suite.add_family('proc')
    fam_dearch = suite.add_family('dearchiving')

    start = ecflow.TimeSlot(0, 0)
    finish = ecflow.TimeSlot(23, 55)
    incr = ecflow.TimeSlot(0, 5)
    time_series = ecflow.TimeSeries(start, finish, incr, False)
    cron = ecflow.Cron()
    cron.set_time_series(time_series)
    fam_arch = suite.add_family('archiving')
    storedata = fam_arch.add_task('storedata')
    storedata.add_cron(cron)
    fam_arch.add_variable('ECF_JOB_CMD', serial_job_cmd)

    # Activate thread limits
    fam_proc.add_inlimit('mpmd_threads')
    fam_dearch.add_inlimit('serial_threads')

    # Define job commands
    fam_proc.add_variable('ECF_JOB_CMD', mpmd_job_cmd)
    fam_dearch.add_variable('ECF_JOB_CMD', serial_job_cmd)

    # ===============================
    # DEFINE DYNAMIC FAMILIES & TASKS
    # ===============================

    dearch_interval = interval_value
    io_hiding_offset = io_offset_value
    dearch_counter = 0
    tar_counter = 0

    fam_tar = None
    fam_chunk = None

    mpmd_families = list()
    tarfiles_within_current_interval = list()

    # Make sure dearchiving interval is at least one 
    # greater than IO hiding offset.
    if not (dearch_interval - io_hiding_offset) >= 1:
        raise ValueError('Dearchiving interval must be at least one greater '
                         'than IO hiding offset.')

    # connect to database and get_sats list
    db = AvhrrGacDatabase(dbfile=gacdb_sqlite_file)

    if args.satellites:
        satellites = args.satellites
    else:
        if args.ignoresats: 
            satellites = db.get_sats(start_date=args.sdate, 
                                     end_date=args.edate, 
                                     ignore_sats=args.ignoresats)
        else:
            satellites = db.get_sats(start_date=args.sdate, 
                                     end_date=args.edate)

    # -- loop over satellites
    for sat in satellites:

        # create sat family
        fam_sat = fam_proc.add_family(sat)

        # add satellite variable
        fam_sat.add_variable("SATELLITE", sat)

        # get years list
        years = db.get_years(sat)

        # -- loop over years for satellite
        for year in years:

            if args.userdatelimit:
                if year < args.sdate.year or year > args.edate.year:
                    continue
                # create family year for satellite
                fam_year = fam_sat.add_family(str(year))
                # start and end date for year & satellite
                if year == args.sdate.year:
                    sd = datetime.date(year, args.sdate.month, args.sdate.day)
                else:
                    sd = datetime.date(year, 1, 1)
                if year == args.edate.year:
                    ed = datetime.date(year, args.edate.month, args.edate.day)
                else:
                    ed = datetime.date(year, 12, 31)
            else:
                # create family year for satellite
                fam_year = fam_sat.add_family(str(year))
                # start and end date for year & satellite
                sd = datetime.date(year, 1, 1)
                ed = datetime.date(year, 12, 31)

            # get tarfile list
            tarfiles = db.get_tarfiles(start_date=sd, end_date=ed,
                                       sats=[sat], include_blacklisted=False,
                                       strict=False)

            # -- loop over tarfiles for year & satellite
            for tarfil in tarfiles:

                logger.info("Working on: {0}".format(tarfil))

                # split tarfilename "NOAA7_1985_01.tar"
                tarbase = os.path.basename(tarfil)
                tarmonth = ((tarbase.split("."))[0].split("_"))[2]
                taryear  = ((tarbase.split("."))[0].split("_"))[1]

                # calendar.monthrange(year, month)
                #   Returns weekday of first day of the month and number of days
                #   in month, for the specified year and month.
                mr = monthrange(int(taryear), int(tarmonth))
                first_tar_date = datetime.date(int(taryear), int(tarmonth), 1)
                last_tar_date = datetime.date(int(taryear), int(tarmonth), mr[1])
                date_str = first_tar_date.strftime("%Y%m%d") + \
                           '_' + last_tar_date.strftime("%Y%m%d")

                if tar_counter % dearch_interval == 0:
                    if fam_chunk:
                        # Add all collected tarfiles to the 
                        # current dearchiving family
                        fam_chunk.add_variable('TARFILES', 
                                ' '.join(tarfiles_within_current_interval))

                        # Reset list of tarfiles within current interval
                        tarfiles_within_current_interval = []

                    # Create new family for dearchiving the next chunk of data.
                    fam_chunk = fam_dearch.add_family('chunk{0}'.
                            format(dearch_counter))
                    add_dearchiving_tasks(fam_chunk)
                    fam_chunk.add_variable("ECF_TRIES", 2)
                    dearch_counter += 1

                    # Make it wait for the current MPMD family minus a possible
                    # offset in order to hide IO time behind computation time.
                    if fam_tar:
                        add_trigger(fam_chunk, 
                                mpmd_families[tar_counter - io_hiding_offset - 1])
                    else:
                        # There is no trigger for the first IO chunk.
                        pass

                # Create one MPMD family for each tar_range_archive
                fam_tar = fam_year.add_family('{0}'.format(tarmonth))
                tar_counter += 1

                # add start and end day of fam_tar
                add_family_variables(fam_tar,
                                     first_tar_date.strftime("%Y%m%d"),
                                     last_tar_date.strftime("%Y%m%d"))

                # Make it wait for the current dearchiving family.
                add_trigger(fam_tar, fam_chunk)

                # Add MPMD tasks to each tarfile family
                add_mpmd_tasks(fam_tar)

                # Save the created family for later use
                mpmd_families.append(fam_tar)
                tarfiles_within_current_interval.append(tarfil)

    # -- end of loop over satellites

    # Add last chunk of collected tarfiles to the last dearchiving family
    fam_chunk.add_variable('TARFILES', 
            ' '.join(tarfiles_within_current_interval))

    # close database connection
    db.close()

    # ============================
    # CREATE SUITE DEFINITION FILE
    # ============================

    # Check job creation
    defs.check_job_creation()

    # Save suite to file
    suite_def_file = mysuite + '.def'
    logger.info('Saving suite definition to file: {0}'.format(suite_def_file))
    defs.save_as_defs(suite_def_file)

    # ======================
    # CREATE LOG DIRECTORIES
    # ======================

    logger.info('Creating log directories on both the local and '
                'the remote machine.')

    # Create a tree of all families in the suite 
    # (i.e. families, subfamilies, subsubfamilies etc)
    tree = familytree(suite)

    # Create corresponding log-directory tree:
    # 1.) Local machine
    for node in tree:
        dirname = os.path.join(ecf_out_dir, node)
        if not os.path.isdir(dirname):
            os.makedirs(dirname)

    # 2.) Remote machine
    ssh = SSHClient(user=remote_user_name, host=remote_host_name)
    for node in tree:
        remote_dir = os.path.join(remote_log_dir, node)
        ssh.mkdir(remote_dir, batch=True)  # batch=True appends this mkdir
        # call to the command batch.

    # Create all remote directories in one step (is much faster)
    ssh.execute_batch()
Esempio n. 6
0
def build_suite():
    """
    Build the ecflow suite.
    """
    logger.info('Building suite.')

    # ========================
    # GENERAL SUITE PROPERTIES
    # ========================

    defs = ecflow.Defs()
    suite = defs.add_suite(mysuite)

    # Set suite level variables
    set_vars(suite)

    # Set default status
    suite.add_defstatus(ecflow.DState.suspended)

    # Define thread limits
    suite.add_limit("mpmd_threads", mpmd_threads_number)
    suite.add_limit("serial_threads", serial_threads_number)

    # ========================
    # ADD CRON JOB
    # ========================

    start = ecflow.TimeSlot(0, 0)
    finish = ecflow.TimeSlot(23, 59)
    incr = ecflow.TimeSlot(0, 1)
    time_series = ecflow.TimeSeries(start, finish, incr, False)
    cron = ecflow.Cron()
    cron.set_time_series(time_series)
    fam_submit = suite.add_family('queue_submitter')
    submit = fam_submit.add_task('submit')
    submit.add_cron(cron)
    fam_submit.add_variable('ECF_JOB_CMD', ecgate_job_cmd)

    # ========================
    # DEFINE TOPLEVEL FAMILIES
    # ========================

    fam_dearch = suite.add_family('dearchiving')
    fam_proc = suite.add_family('processing')
    fam_make = suite.add_family('make_tarfile')
    fam_arch = suite.add_family('archiving')

    # Activate thread limits
    fam_dearch.add_inlimit('serial_threads')
    fam_proc.add_inlimit('mpmd_threads')
    fam_make.add_inlimit('serial_threads')
    fam_arch.add_inlimit('serial_threads')

    # Define job commands
    fam_dearch.add_variable('ECF_JOB_CMD', serial_job_cmd)
    fam_proc.add_variable('ECF_JOB_CMD', mpmd_job_cmd)
    fam_make.add_variable('ECF_JOB_CMD', serial_job_cmd)
    fam_arch.add_variable('ECF_JOB_CMD', serial_job_cmd)

    # ===============================
    # DEFINE DYNAMIC FAMILIES & TASKS
    # ===============================

    for mm in rrule(MONTHLY, dtstart=args.sdate, until=args.edate):

        yearstr = mm.strftime("%Y")
        monthstr = mm.strftime("%m")
        act_date = datetime.date(int(yearstr), int(monthstr), 1)
        first_day = "01"
        last_day = calendar.monthrange(int(yearstr), int(monthstr))[1]
        yyyymm = yearstr + monthstr
        start_date = yearstr + monthstr + first_day
        end_date = yearstr + monthstr + str(last_day)

        if args.ignore_months:
            if int(monthstr) in args.ignore_months:
                continue

        try:
            # dearchiving family
            fam_year_dearch = add_fam(fam_dearch, yearstr)

            # processing family
            fam_year_proc = add_fam(fam_proc, yearstr)
            add_trigger(fam_year_proc, fam_year_dearch)

            # make yearly tarfile family
            fam_year_make = add_fam(fam_make, yearstr)
            fam_year_make.add_variable("YEAR", yearstr)
            add_make_tarfile_task(fam_year_make)
            add_trigger(fam_year_make, fam_year_proc)

        except RuntimeError:
            pass

        # dearchiving family
        fam_month_dearch = add_fam(fam_year_dearch, monthstr)
        fam_month_dearch.add_variable("YYYYMM", yyyymm)
        fam_month_dearch.add_variable("START_DATE", start_date)
        fam_month_dearch.add_variable("END_DATE", end_date)
        fam_month_dearch.add_variable("NDAYS", last_day)
        add_dearchiving_task(fam_month_dearch)

        # processing family
        fam_month_proc = add_fam(fam_year_proc, monthstr)
        fam_month_proc.add_variable("YYYYMM", yyyymm)
        fam_month_proc.add_variable("SY", yearstr)
        fam_month_proc.add_variable("EY", yearstr)
        fam_month_proc.add_variable("SM", monthstr)
        fam_month_proc.add_variable("EM", monthstr)
        fam_month_proc.add_variable("SD", first_day)
        fam_month_proc.add_variable("ED", last_day)
        add_mpmd_tasks(fam_month_proc)

    # create 1 tarball containing time series tarfiles
    add_archiving_task(fam_arch)
    add_trigger(fam_arch, fam_make)

    # ============================
    # CREATE SUITE DEFINITION FILE
    # ============================

    # Check job creation
    defs.check_job_creation()

    # Save suite to file
    suite_def_file = mysuite + '.def'
    logger.info('Saving suite definition to file: {0}'.format(suite_def_file))
    defs.save_as_defs(suite_def_file)

    # ======================
    # CREATE LOG DIRECTORIES
    # ======================

    logger.info('Creating log directories on both the local and '
                'the remote machine.')

    # Create a tree of all families in the suite
    # (i.e. families, subfamilies, subsubfamilies etc)
    tree = familytree(suite)

    # Create corresponding log-directory tree:
    # 1.) Local machine
    for node in tree:
        dirname = os.path.join(ecf_out_dir, node)
        if not os.path.isdir(dirname):
            os.makedirs(dirname)

    # 2.) Remote machine
    ssh = SSHClient(user=remote_user_name, host=remote_host_name)
    for node in tree:
        remote_dir = os.path.join(remote_log_dir, node)
        ssh.mkdir(remote_dir, batch=True)  # batch=True appends this mkdir
        # call to the command batch.

    # Create all remote directories in one step (is much faster)
    ssh.execute_batch()
 def get_test_data(self):
     cwd = os.getcwd()
     ecflow_states_test_data_sets = "{}/data/ecflow_sample_state".format(
         cwd)
     #ecflow_states_test_data_sets = "{}/data/ecflow_state_staging".format(cwd)
     return ecflow.Defs(ecflow_states_test_data_sets)
Esempio n. 8
0
def build_suite(sdate, edate, satellites_list, ignoresats_list,
                ignoremonths_list, useprimes, modisonly, procday, dummycase,
                testcase, toacase):
    """
    Build the ecflow suite.
    """

    global fam_avhrr, fam_modis, fam_year

    logger.info('Building suite.')

    # get SVN version
    svn_version = get_svn_version(svn_path)

    # ========================
    # GENERAL SUITE PROPERTIES
    # ========================
    defs = ecflow.Defs()
    suite = defs.add_suite(mysuite)

    # Set suite level variables
    set_vars(suite, procday, dummycase, testcase, svn_version, toacase)

    # Set default status
    suite.add_defstatus(ecflow.DState.suspended)

    # Define thread limits
    suite.add_limit("serial_threads", serial_threads_number)
    suite.add_limit("parallel_threads", parallel_threads_number)

    # ========================
    # DEFINE TOP LEVEL FAMILY
    # ========================
    fam_proc = add_fam(suite, big_fam)
    fam_dearch = add_fam(suite, dearchiving)

    # Activate thread limits
    fam_proc.add_inlimit('parallel_threads')
    fam_dearch.add_inlimit('serial_threads')

    # Define job commands
    fam_proc.add_variable('ECF_JOB_CMD', serial_job_cmd)
    fam_dearch.add_variable('ECF_JOB_CMD', serial_job_cmd)

    # connect to database and get_sats list
    db = AvhrrGacDatabase(dbfile=sql_avhrr_gac)

    # verify user input and database content
    logger.info('Verify satellite settings')
    sat_list = verify_satellite_settings(db, sdate, edate, satellites_list,
                                         ignoresats_list, modisonly)

    # Are there any data for processing?
    if len(sat_list) == 0:
        logger.info("--------------------------------------------------------")
        logger.info("*** There are no data for {0} - {1}".format(sdate, edate))
        logger.info("Please check parameters you have passed!")
        logger.info("--------------------------------------------------------")
        db.close()
        sys.exit(0)

    # ================================
    # DEFINE DYNAMIC FAMILIES & TASKS
    # ================================

    # original satellite list
    orig_sat_list = sat_list

    # memorize previous month
    fam_month_previous = False

    # memorize satellites for each month
    satellites_within_current_month = list()

    # relevant for post_proc: L3S
    avhrr_logdirs = list()
    modis_logdirs = list()
    l2bsum_logdirs_within_current_month = list()

    # month counter
    month_cnt = 0

    # ----------------------------------------------------
    # loop over months for given date range
    # ----------------------------------------------------
    for mm in rrule(MONTHLY, dtstart=sdate, until=edate):

        yearstr = mm.strftime("%Y")
        monthstr = mm.strftime("%m")
        act_date = datetime.date(int(yearstr), int(monthstr), 1)
        ndays_of_month = calendar.monthrange(int(yearstr), int(monthstr))[1]
        YYYYMM = yearstr + monthstr

        # check if month should be skipped, if given
        if ignoremonths_list:
            # if act_date in ignoremonths_list:
            if int(monthstr) in ignoremonths_list:
                continue

        # check for avhrr primes
        if useprimes:
            sat_list = verify_avhrr_primes(orig_sat_list, act_date)

        # check if any AVHRR or/and MODIS are avail.
        modis_flag = False
        avhrr_flag = False
        for s in sat_list:
            isensor = get_sensor(s)

            if isensor == "AVHRR" and avhrr_flag is False:
                days = db.get_days(sat=s,
                                   year=int(yearstr),
                                   month=int(monthstr))
                if len(days) > 0:
                    avhrr_flag = True
                platform = s
                if s[0:4] == "NOAA":
                    platform = "NOAA-" + s.split("NOAA")[1]
                l3_file = YYYYMM + "-ESACCI-L3C_CLOUD-CLD_PRODUCTS-" + isensor + "_" + platform + "-fv2.1.tar"
                ecfs_target = os.path.join(ecfs_l3_dir, YYYYMM, l3_file)
                args = ['els'] + [ecfs_target]
                p1 = subprocess.Popen(args,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
                stdout, stderr = p1.communicate()
                if "els: File does not exist" in stderr:
                    avhrr_flag = True
                else:
                    avhrr_flag = False

            if isensor == "MODIS" and modis_flag is False:
                modsd = datetime.date(int(yearstr), int(monthstr), 1)
                moded = enddate_of_month(int(yearstr), int(monthstr))
                modis_flag = get_modis_avail(s, modsd, moded)
                platform = s
                l3_file = YYYYMM + "-ESACCI-L3C_CLOUD-CLD_PRODUCTS-" + isensor + "_" + platform + "-fv2.1.tar"
                ecfs_target = os.path.join(ecfs_l3_dir, YYYYMM, l3_file)
                args = ['els'] + [ecfs_target]
                p1 = subprocess.Popen(args,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
                stdout, stderr = p1.communicate()
                if "els: File does not exist" in stderr:
                    modis_flag = True
                else:
                    modis_flag = False

        # neither avhrr nor modis -> go to next month
        if avhrr_flag is False and modis_flag is False:
            continue

        # there is data: add fam. year if not already existing
        try:
            # PROC family
            fam_year = add_fam(fam_proc, yearstr)
            fam_year.add_variable("START_YEAR", yearstr)
            fam_year.add_variable("END_YEAR", yearstr)
            # DEARCHIVING family
            fam_year_dearch = add_fam(fam_dearch, yearstr)
            fam_year_dearch.add_variable("START_YEAR", yearstr)
            fam_year_dearch.add_variable("END_YEAR", yearstr)
        except RuntimeError:
            pass

        # PROC family: add fam. month
        fam_month = add_fam(fam_year, monthstr)
        fam_month.add_variable("START_MONTH", monthstr)
        fam_month.add_variable("END_MONTH", monthstr)
        fam_month.add_variable("NDAYS_OF_MONTH", ndays_of_month)

        # DEARCHIVING family: add fam. month
        fam_month_dearch = add_fam(fam_year_dearch, monthstr)
        fam_month_dearch.add_variable("START_MONTH", monthstr)
        fam_month_dearch.add_variable("END_MONTH", monthstr)

        # DEARCHIVING family: add get aux/era family
        fam_aux = add_fam(fam_month_dearch, get_aux_fam)
        add_aux_tasks(fam_aux, fam_month_previous)

        # PROC family: add main processing
        fam_main = add_fam(fam_month, mainproc_fam)

        # if avhrr data available for current month
        if avhrr_flag:
            # PROC
            fam_avhrr = add_fam(fam_main, "AVHRR")
            fam_avhrr.add_variable("SENSOR", "AVHRR")
            # DEARCHIVING
            fam_avhrr_dearch = add_fam(fam_month_dearch, "AVHRR")
            fam_avhrr_dearch.add_variable("SENSOR", "AVHRR")

        # if modis data available for current month
        if modis_flag:
            # PROC
            fam_modis = add_fam(fam_main, "MODIS")
            fam_modis.add_variable("SENSOR", "MODIS")
            # DEARCHIVING
            fam_modis_dearch = add_fam(fam_month_dearch, "MODIS")
            fam_modis_dearch.add_variable("SENSOR", "MODIS")

        # process avail. satellites for current month
        for counter, satellite in enumerate(sat_list):
            isensor = get_sensor(satellite)

            if isensor == "AVHRR":
                days = db.get_days(sat=satellite,
                                   year=int(yearstr),
                                   month=int(monthstr))
                if len(days) == 0:
                    continue

                platform = satellite
                if satellite[0:4] == "NOAA":
                    platform = "NOAA-" + satellite.split("NOAA")[1]
                l3_file = YYYYMM + "-ESACCI-L3C_CLOUD-CLD_PRODUCTS-" + isensor + "_" + platform + "-fv2.1.tar"
                ecfs_target = os.path.join(ecfs_l3_dir, YYYYMM, l3_file)
                args = ['els'] + [ecfs_target]
                p1 = subprocess.Popen(args,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
                stdout, stderr = p1.communicate()
                if "els: File does not exist" not in stderr:
                    print "L3C file available in ECFS, so skipping " + platform + " for " + YYYYMM
                    continue

                # DEARCHIVING
                fam_sat_dearch = add_fam(fam_avhrr_dearch, satellite)
                fam_sat_dearch.add_variable("SATELLITE", satellite)
                add_dearchiving_tasks(fam_sat_dearch, fam_aux, counter)
                # PROC
                fam_sat = add_fam(fam_avhrr, satellite)
                fam_sat.add_variable("SATELLITE", satellite)
                add_main_proc_tasks(fam_sat, [fam_sat_dearch],
                                    fam_month_previous, satellite)

                satellites_within_current_month.append(satellite)
                l2bsum_logdir = os.path.join(esa_ecflogdir, mysuite, big_fam,
                                             yearstr, monthstr, mainproc_fam,
                                             isensor, satellite)
                l2bsum_logdirs_within_current_month.append(l2bsum_logdir)
            else:
                msdate = datetime.date(int(yearstr), int(monthstr), 1)
                medate = enddate_of_month(int(yearstr), int(monthstr))
                mcheck = get_modis_avail(satellite, msdate, medate)
                if not mcheck:
                    continue

                platform = satellite
                l3_file = YYYYMM + "-ESACCI-L3C_CLOUD-CLD_PRODUCTS-" + isensor + "_" + platform + "-fv2.1.tar"
                ecfs_target = os.path.join(ecfs_l3_dir, YYYYMM, l3_file)
                args = ['els'] + [ecfs_target]
                p1 = subprocess.Popen(args,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
                stdout, stderr = p1.communicate()
                if "els: File does not exist" not in stderr:
                    print "L3C file available in ECFS, so skipping " + platform + " for " + YYYYMM
                    continue

                # DEARCHIVING
                fam_sat_dearch = add_fam(fam_modis_dearch, satellite)
                fam_sat_dearch.add_variable("SATELLITE", satellite)
                # PROC
                fam_sat = add_fam(fam_modis, satellite)
                fam_sat.add_variable("SATELLITE", satellite)

                satellites_within_current_month.append(satellite)
                l2bsum_logdir = os.path.join(esa_ecflogdir, mysuite, big_fam,
                                             yearstr, monthstr, isensor,
                                             satellite)
                l2bsum_logdirs_within_current_month.append(l2bsum_logdir)

                if avhrr_flag:
                    add_dearchiving_tasks(fam_sat_dearch, fam_aux, counter)
                    #                  (family, prefamily (=trigger))
                    add_main_proc_tasks(fam_sat, [fam_avhrr, fam_sat_dearch],
                                        fam_month_previous, satellite)
                    fam_avhrr = fam_sat
                else:
                    add_dearchiving_tasks(fam_sat_dearch, fam_aux, counter)
                    #                  (family, prefamily (=trigger))
                    add_main_proc_tasks(fam_sat, [fam_aux, fam_sat_dearch],
                                        fam_month_previous, satellite)
                    fam_aux = fam_sat

        # -- end of satellite loop

        # satellites within current month
        logger.info("satellites_within_current_month:{0}".format(
            satellites_within_current_month))

        # check if enough satellites are available for L3S product
        avhrr_cnt = 0
        modis_cnt = 0

        for ldirs in l2bsum_logdirs_within_current_month:
            if "AVHRR" in ldirs:
                avhrr_cnt += 1
                avhrr_logdirs.append(ldirs)
            if "MODIS" in ldirs:
                modis_cnt += 1
                modis_logdirs.append(ldirs)

        # add fam. post processing
        if avhrr_cnt > 1 or modis_cnt > 1:
            fam_post = add_fam(fam_month, postproc_fam)
            last_fam_trigger = fam_post
        else:
            last_fam_trigger = fam_main

        if avhrr_cnt > 1:
            fam_avhrr_post = add_fam(fam_post, "AVHRR_L3S")
            fam_avhrr_post.add_variable("SENSOR_FAM", "AVHRR")
            add_l3s_product_tasks(fam_avhrr_post, fam_main)
            fam_avhrr_post.add_variable('L2B_SUM_LOGDIRS',
                                        ' '.join(avhrr_logdirs))
            logger.info("L3S: {0} AVHRR(s) in {1} for {2}/{3}".format(
                avhrr_cnt, mainproc_fam, yearstr, monthstr))
            logger.info("Use {0} for L3S production".format(avhrr_logdirs))
        else:
            logger.info("No L3S production due to "
                        "{0} AVHRR in {1} for {2}/{3}".format(
                            avhrr_cnt, mainproc_fam, yearstr, monthstr))

        if modis_cnt > 1:
            fam_modis_post = add_fam(fam_post, "MODIS_L3S")
            fam_modis_post.add_variable("SENSOR_FAM", "MODIS")
            add_l3s_product_tasks(fam_modis_post, fam_main)
            fam_modis_post.add_variable('L2B_SUM_LOGDIRS',
                                        ' '.join(modis_logdirs))
            logger.info("L3S: {0} MODIS(s) in {1} for {2}/{3}".format(
                modis_cnt, mainproc_fam, yearstr, monthstr))
            logger.info("Use {0} for L3S production".format(modis_logdirs))
        else:
            logger.info("No L3S production due to "
                        "{0} MODIS in {1} for {2}/{3}".format(
                            modis_cnt, mainproc_fam, yearstr, monthstr))

        # add cleanup aux/era, l2, l2_sum files
        fam_final_cleanup = add_fam(fam_month, final_fam)
        fam_final_cleanup.add_variable(
            'CURRENT_SATELLITE_LIST',
            ' '.join(satellites_within_current_month))
        add_final_cleanup_task(fam_final_cleanup, last_fam_trigger)

        # remember fam_month
        fam_month_previous = fam_month
        month_cnt += 1

        # reset lists
        satellites_within_current_month = []
        l2bsum_logdirs_within_current_month = []
        avhrr_logdirs = []
        modis_logdirs = []

    # ----------------------------------------------------
    # end of loop over months
    # ----------------------------------------------------

    # close connection to database
    db.close()

    # ============================
    # CREATE SUITE DEFINITION FILE
    # ============================

    # Check job creation
    logger.info("Defs Check Job Creation: {0}".format(
        defs.check_job_creation()))

    # Save suite to file
    suite_def_file = mysuite + '.def'
    logger.info('Saving suite definition to file: {0}'.format(suite_def_file))
    defs.save_as_defs(suite_def_file)

    # ======================
    # CREATE LOG DIRECTORIES
    # ======================
    logger.info('Creating log directories on both the '
                'local and the remote machine.\n')

    # Create a tree of all families in the suite
    # (i.e. families, subfamilies, subsubfamilies etc)
    tree = familytree(suite)

    # Create corresponding log-directory tree:
    # 1.) Local machine
    for node in tree:
        dirname = os.path.join(ecf_out_dir, node)
        if not os.path.isdir(dirname):
            os.makedirs(dirname)

    # 2.) Remote machine
    ssh = SSHClient(user=remote_user_name, host=remote_host_name)
    for node in tree:
        remote_dir = os.path.join(remote_log_dir, node)
        ssh.mkdir(remote_dir, batch=True)  # batch=True appends this mkdir
        # call to the command batch.

    # Create all remote directories in one step (is much faster)
    ssh.execute_batch()