예제 #1
0
 def run(self):
     systemsettings = functions.getSystemSettings()
     # Special case for mesa-proc @ JRC
     if systemsettings['type_installation'] == 'Server':
         get_eumetcast.loop_eumetcast(dry_run=self.dry_run)
     else:
         get_eumetcast.loop_eumetcast_ftp(dry_run=self.dry_run)
예제 #2
0
    def test_system_data_sync(self):

        source = es_constants.es2globals['processing_dir']
        system_settings = functions.getSystemSettings()
        ip_target = system_settings['ip_pc2']
        target = ip_target + '::products' + es_constants.es2globals['processing_dir']

        status = es2system.system_data_sync(source, target)
        self.assertEqual(status, 0)
예제 #3
0
 def test_setSystemSetting(self):
     setting = 'ingest_archive_eum'
     value = 'true'
     result = functions.setSystemSetting(setting, value)
     self.assertTrue(result)
     systemsettings = functions.getSystemSettings()
     if 'ingest_archive_eum' in systemsettings.keys():
         self.assertEqual(systemsettings['ingest_archive_eum'], 'true')
     else:
         self.assertTrue(False)
예제 #4
0
def get_status_local_machine():
    #   Get info on the status of the local machine
    #
    logger.debug("Entering routine %s" % 'get_status_local_machine')

    # Get the local systems settings
    systemsettings = functions.getSystemSettings()

    # Get status of all services
    status_services = functions.getStatusAllServices()

    get_eumetcast_status = status_services['eumetcast']
    get_internet_status = status_services['internet']
    ingestion_status = status_services['ingest']
    processing_status = status_services['process']
    system_status = status_services['system']

    # Get status of postgresql
    psql_status = functions.getStatusPostgreSQL()

    # Get internet connection
    internet_status = functions.internet_on()

    # ToDo: check disk status!

    status_local_machine = {
        'get_eumetcast_status':
        get_eumetcast_status,
        'get_internet_status':
        get_internet_status,
        'ingestion_status':
        ingestion_status,
        'processing_status':
        processing_status,
        'system_status':
        system_status,
        'system_execution_time':
        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        'postgresql_status':
        str(psql_status).lower(),
        'internet_connection_status':
        str(internet_status).lower(),
        'active_version':
        systemsettings['active_version'],
        'mode':
        systemsettings['mode'],
        'disk_status':
        'true'
    }
    return status_local_machine
예제 #5
0
def system_bucardo_config():
    #   Check if bucardo has been already configured, and if there are conditions to do so

    res_bucardo_config = 0

    logger.debug("Entering routine %s" % 'system_bucardo_config')

    # Returns 0 if no any sync exists
    res_list_sync = os.system(
        'bucardo list sync | grep "No syncs found" 1>/dev/null')

    # If no any sync exists, bucardo still to be configured
    if not res_list_sync:

        # Get relevant variables
        sysSettings = functions.getSystemSettings()
        role = sysSettings['role']

        # Check the other computer is ready
        if role == 'PC2':
            other_pc = 'MESA-PC3'
        else:
            other_pc = 'MESA-PC2'

        command = '/usr/pgsql-9.3/bin/pg_isready -h ' + other_pc
        other_pc_not_ready = os.system(command)

        if not other_pc_not_ready:

            # Execute the configuration
            command = '/var/www/eStation2/config/install/bucardo_config.sh '+ role.lower() +\
                      ' 1>/var/log/bucardo/bucardo_config.log'+ ' 2>/var/log/bucardo/bucardo_config.err'

            res_bucardo_config = os.system(command)

            # Restart bucardo
            command = 'bucardo restart'
            res_bucardo_config += os.system(command)

        else:
            logger.error('The other computer ' + other_pc +
                         ' is not ready. Exit.')

    # Exit
    return res_bucardo_config
예제 #6
0
def remove_old_files(productcode,
                     subproductcode,
                     version,
                     mapsetcode,
                     product_type,
                     nmonths,
                     logger=None):

    # Check logger
    if logger is None:
        logger = log.my_logger(__name__)

    # Get the existing dates for the dataset
    logger.info("Entering routine %s " % 'remove_old_files')

    # Check the installation type
    sysSettings = functions.getSystemSettings()
    if sysSettings['type_installation'] == 'Server':
        logger.info("File housekeeping not done on Server ")
        return

    prod_subdir = functions.set_path_sub_directory(productcode, subproductcode,
                                                   product_type, version,
                                                   mapsetcode)
    prod_dir = es_constants.es2globals[
        'processing_dir'] + os.path.sep + prod_subdir
    list_files = sorted(glob.glob(prod_dir + os.path.sep + '*.tif'))

    # Define the earliest date to be kept
    month_now = datetime.date.today().month
    year_now = datetime.date.today().year

    for my_file in list_files:
        # Extract the date
        date = functions.get_date_from_path_filename(os.path.basename(my_file))
        date_yyyy = int(date[0:4])
        date_month = int(date[4:6])

        if date_yyyy < year_now or (date_month + nmonths) <= month_now:
            logger.debug("Deleting file %s " % my_file)
            os.remove(my_file)
예제 #7
0
class TestSystem(unittest.TestCase):
    systemsettings = functions.getSystemSettings()
    install_type = systemsettings['type_installation'].lower()

    def test_manage_lock(self):

        # Check the management of the lock files
        lock_id = 'test_operation'
        # Delete all existing locks
        status = es2system.system_manage_lock('All_locks', 'Delete')
        self.assertEquals(status, 0)

        # Check a (not existing) lock
        status = es2system.system_manage_lock(lock_id, 'Check')
        self.assertEquals(status, 0)

        # Create a lock
        status = es2system.system_manage_lock(lock_id, 'Create')
        self.assertEquals(status, 0)

        # Check an (existing) lock
        status = es2system.system_manage_lock(lock_id, 'Check')
        self.assertEquals(status, 1)

    def test_save_status(self):

        # Save machine status in /eStation/system
        status_system_file = es2system.save_status_local_machine()
        self.assertEqual(status_system_file, 0)

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_change_ip_addresses_default(self):
        ip_pc1 = '192.168.0.11'
        ip_pc2 = '192.168.0.15'
        ip_pc3 = '192.168.0.16'
        ip_dns = '192.168.0.1'
        gateway = '192.168.0.1'
        ip_lan = '192.168.0.0/24'

        sudo_psw = 'mesadmin'
        command = es_constants.es2globals['base_dir'] + '/apps/es2system/network_config_1.0.sh ' + \
                  ip_pc1 + ' ' + \
                  ip_pc2 + ' ' + \
                  ip_pc3 + ' ' + \
                  ip_dns + ' ' + \
                  gateway + ' ' + \
                  ip_lan
        status = os.system('echo %s | sudo -S %s' % (sudo_psw, command))
        self.assertEqual(status, 0)

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_change_ip_addresses_Container(self):

        ip_pc1 = '10.191.231.11'  # the machine does not actually exist ..
        ip_pc2 = '10.191.231.89'
        ip_pc3 = '10.191.231.90'
        ip_dns = '139.191.1.132'  # Only primary ... secondary to be added
        gateway = '10.191.231.1'
        ip_lan = '10.191.231.0/24'  # To be checked (?)

        sudo_psw = 'mesadmin'
        command = es_constants.es2globals['base_dir'] + '/apps/es2system/network_config_1.0.sh ' + \
                  ip_pc1 + ' ' + \
                  ip_pc2 + ' ' + \
                  ip_pc3 + ' ' + \
                  ip_dns + ' ' + \
                  gateway + ' ' + \
                  ip_lan
        status = os.system('echo %s | sudo -S %s' % (sudo_psw, command))
        self.assertEqual(status, 0)

    # The following test enters an infinite loop (?) -> see test_system_loop below with dry_run = True
    # def test_system_service(self):
    #
    #     status = es2system.loop_system(dry_run=False)
    #     self.assertEqual(status, 0)

    @unittest.skipIf(True, "Not working - to be checked")
    def test_system_db_dump_docker(self):

        list_dump = ['products', 'analysis']
        status = es2system.system_db_dump_docker(list_dump)
        self.assertEqual(status, b'')

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_system_db_dump(self):

        list_dump = ['products', 'analysis']
        status = es2system.system_db_dump(list_dump)
        self.assertEqual(status, 0)

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_system_manage_dumps(self):

        status = es2system.system_manage_dumps()
        self.assertEqual(status, 0)

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_system_data_sync(self):

        source = es_constants.es2globals['processing_dir']
        system_settings = functions.getSystemSettings()
        ip_target = system_settings['ip_pc2']
        target = ip_target + '::products' + es_constants.es2globals['processing_dir']

        status = es2system.system_data_sync(source, target)
        self.assertEqual(status, 0)

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_db_sync_full_from_PC2(self):

        list_syncs = ['sync_pc2_products_full', 'sync_pc2_analysis_full']
        status = es2system.system_db_sync(list_syncs)

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_db_sync_full_from_PC3(self):

        list_syncs = ['sync_pc3_analysis_full', 'sync_pc3_products_full']
        status = es2system.system_db_sync(list_syncs)

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_system_db_sync_full(self):

        # Should get here the role of my machine ...
        status = es2system.system_db_sync_full('pc2')
        self.assertTrue(status)

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_system_bucardo_config(self):

        # Should get here the role of my machine ...
        status = es2system.system_bucardo_config()

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_system_status_PC1(self):

        # Should get here the role of my machine ...
        status = es2system.get_status_PC1()

    # @unittest.skipIf(install_type != 'full',"Test only on MESA Station - Full install")
    def test_system_install_report(self):

        # Should get here the role of my machine ...
        repfile = es2system.system_install_report()
        self.assertTrue(os.path.isfile(repfile))

    def test_system_create_report(self):

        # Create the report and check file axists
        repfile = es2system.system_create_report()
        self.assertTrue(os.path.isfile(repfile))

    def test_clean_temp_dir(self):

        # Clean the temp dirs and check the status
        status = es2system.clean_temp_dir()
        self.assertEqual(status, 0)

    @unittest.skipIf(install_type != 'full', "Test only on MESA Station - Full install")
    def test_bucardo_service(self):

        # Should get here the role of my machine ...
        status = es2system.system_bucardo_service('stop')

    def test_system_loop(self):

        # Call the system loop in dry mode (exits after first iteration)
        status = es2system.loop_system(dry_run=True)
        self.assertEqual(status, 0)

    @unittest.skipIf(install_type != 'server', "Test only on JRC Server Installation")
    def test_push_ftp_aruba(self):

        try:
            import apps.es2system.test.aruba_credentials as ac
        except:
            return 1

        # Masked=FALSE means the masked products are pushed.
        status = es2system.push_data_ftp(url=ac.url, user=ac.user, psw=ac.psw, trg_dir=ac.trg_dir, masked=False)
        self.assertEqual(status, 0)

    @unittest.skipIf(install_type != 'server', "Test only on JRC Server Installation")
    def test_push_ftp_jrc(self):

        # Execute w.o. arguments: they are read from config/server_ftp.py
        # The products/versions considered for sync are: the 'activated' ones - except the ones set as 'EXCLUDE' in server_ftp.py definitions
        # Masked=TRUE means the masked sub-products are not pushed (which is the default)

        status = es2system.push_data_ftp(masked=True)
        self.assertEqual(status, 0)
예제 #8
0
def loop_system(dry_run=False):
    #    Driver of the system service
    #    Reads configuration from the system setting file (system_settings.ini)
    #    Perform the needed operations, according to the machine role/mode
    #    Arguments: dry_run -> if > 0, only report what has to be done (no actions)
    #                       -> if = 0, do operations (default)

    logger.info("Entering routine %s" % 'loop_system')

    # Specific settings for the system operations
    delay_data_sync_minutes = es_constants.es2globals[
        'system_delay_data_sync_min']
    time_for_db_dump = es_constants.es2globals['system_time_db_dump_hhmm']
    time_for_spirits_conv = es_constants.es2globals['system_time_spirits_conv']

    try:
        time_for_push_ftp = es_constants.es2globals['system_time_push_ftp']
    except:
        logger.info("Parameter not defined in factory_settings: %s" %
                    'system_time_push_ftp')
        time_for_push_ftp = '24:01'

    do_bucardo_config = False

    # Restart bucardo
    command = 'bucardo restart'
    res_bucardo_restart = os.system(command)
    if res_bucardo_restart:
        logger.warning("Error in restarting bucardo")
    else:
        logger.info("Bucardo restarted correctly")

    # Loop to manage the 'cron-like' operations, i.e.:

    #   0. Check bucardo config
    #   a. Data sync (not anymore, done by TPZF)
    #   b. DB sync: bucardo
    #   c. DB dump (create/manage)
    #   d. Spirits conversion
    #   e. Clean Temporary directory

    execute_loop = True
    while execute_loop:

        logger.info("")
        logger.info("Entering the System Service loop")

        # Read the relevant info from system settings
        system_settings = functions.getSystemSettings()
        logger.debug('System Settings Mode: %s ' % system_settings['mode'])

        # Initialize To Do flags
        do_data_sync = False
        schemas_db_sync = []
        schemas_db_dump = []
        do_convert_spirits = False
        do_clean_tmp = True
        do_push_ftp = False

        if system_settings['data_sync'] in [
                'True', 'true', '1', 't', 'y', 'Y', 'yes', 'Yes'
        ]:
            do_data_sync = True
        else:
            do_data_sync = False

        if system_settings['db_sync'] in [
                'True', 'true', '1', 't', 'y', 'Y', 'yes', 'Yes'
        ]:
            do_db_sync = True
        else:
            do_db_sync = False

        # Implement the logic of operations based on type/role/mode
        if system_settings['type_installation'] == 'Full':

            do_bucardo_config = True
            if system_settings['role'] == 'PC2':
                status_otherPC = functions.get_remote_system_status('mesa-pc3')
                if len(status_otherPC) != 0:
                    mode_otherPC = status_otherPC['mode']
                else:
                    mode_otherPC = 'unreachable'

                # ip_target = system_settings['ip_pc3']
                if system_settings['mode'] == 'nominal':
                    if mode_otherPC == 'recovery':
                        do_data_sync = False
                        logger.info(
                            "Do not do data_sync because other PC is in Recovery Mode"
                        )
                    elif mode_otherPC == 'unreachable':
                        do_data_sync = False
                        logger.info(
                            "Do not do data_sync because other PC is not reachable"
                        )

                    schemas_db_sync = ['products']
                    schemas_db_dump = ['products', 'analysis']
                    do_convert_spirits = True
                    bucardo_action = 'start'

                if system_settings['mode'] == 'recovery':
                    schemas_db_sync = []
                    schemas_db_dump = ['products', 'analysis']
                    do_convert_spirits = True
                    bucardo_action = 'stop'

                if system_settings['mode'] == 'maintenance':
                    schemas_db_sync = []
                    schemas_db_dump = []
                    bucardo_action = 'stop'

            if system_settings['role'] == 'PC3':
                status_otherPC = functions.get_remote_system_status('mesa-pc2')

                if len(status_otherPC) != 0:
                    mode_otherPC = status_otherPC['mode']
                else:
                    mode_otherPC = 'unreachable'

                # ip_target = system_settings['ip_pc2']
                if system_settings['mode'] == 'nominal':
                    if mode_otherPC == 'recovery':
                        do_data_sync = False
                        logger.info(
                            "Do not do data_sync because other PC is in Recovery Mode"
                        )
                    elif mode_otherPC == 'unreachable':
                        do_data_sync = False
                        logger.info(
                            "Do not do data_sync because other PC is not reachable"
                        )

                    schemas_db_sync = ['analysis']
                    schemas_db_dump = ['products', 'analysis']
                    bucardo_action = 'start'

                if system_settings['mode'] == 'recovery':
                    schemas_db_sync = []
                    schemas_db_dump = ['products', 'analysis']
                    bucardo_action = 'stop'

                if system_settings['mode'] == 'maintenance':
                    schemas_db_sync = []
                    schemas_db_dump = []
                    bucardo_action = 'stop'

        if system_settings['type_installation'] == 'SinglePC':
            do_data_sync = False
            schemas_db_sync = []
            schemas_db_dump = ['products', 'analysis']

        if system_settings['type_installation'] == 'Server':
            do_data_sync = False
            do_db_sync = False
            schemas_db_sync = []
            schemas_db_dump = ['products', 'analysis']
            do_convert_spirits = False
            do_push_ftp = True

        if es_constants.es2globals['do_spirits_conversion'] in [
                'True', 'true', '1', 't', 'y', 'Y', 'yes', 'Yes'
        ]:
            do_convert_spirits = True

        # Report on the actions to be done
        logger.info("")
        logger.info("*   Schedule follows ")
        logger.info("*   Do bucardo config:  " + str(do_bucardo_config))
        logger.info("*   Do data-sync:       " + str(do_data_sync))
        logger.info("*   Do db-sync:         " + str(do_db_sync))
        # logger.info("*   Nr schema to dump:  " + str(len(schemas_db_dump)))
        logger.info("*   Do spirits convers: " + str(do_convert_spirits))
        logger.info("*   Do push to ftp:     " + str(do_push_ftp))
        logger.info("")

        # do_bucardo_config
        if do_bucardo_config:
            system_bucardo_config()

        # do_data_sync
        operation = 'data_sync'
        if do_data_sync:
            check_time = check_delay_time(
                operation, delay_minutes=delay_data_sync_minutes)
            logger.info("check_time: " + str(check_time))
            if check_time:
                logger.info("Executing data synchronization")
                data_source = es_constants.es2globals['processing_dir']
                # data_target = ip_target+'::products'+es_constants.es2globals['processing_dir']
                data_target = 'PC3::products'
                system_data_sync(data_source, data_target)
                check_delay_time(operation,
                                 delay_minutes=delay_data_sync_minutes,
                                 write=True)

        # DB sync: execute every cycle if in system_setting (no delay)
        operation = 'db_sync'
        if len(schemas_db_sync) > 0:
            if do_db_sync:
                logger.info("Executing db synchronization")
                # Call the function
                system_bucardo_service(bucardo_action)

        # DB dump
        operation = 'db_dump'
        if len(schemas_db_dump) > 0:
            check_time = check_delay_time(operation, time=time_for_db_dump)
            if check_time:
                # Execute the dump of the schemas active on the machine - Correct Tuleap #10905
                logger.info("Executing db dump")
                system_db_dump(schemas_db_dump)

                # Manage the file dumps (rotation)
                logger.info("Executing manage dumps")
                system_manage_dumps()

        # Convert to spirits format
        operation = 'convert_spirits'
        if do_convert_spirits:
            check_time = check_delay_time(operation,
                                          time=time_for_spirits_conv)
            if check_time:
                logger.info("Convert to SPIRITS format")
                output_dir = es_constants.es2globals['spirits_output_dir']
                conv.convert_driver(output_dir)

        # Push data to ftp server
        operation = 'push_to_ftp'
        if do_push_ftp:
            check_time = check_delay_time(operation, time=time_for_push_ftp)
            if check_time:
                logger.info("Push data to remote ftp server")
                status = push_data_ftp()

        # Clean temporary directory
        operation = 'clean_temp'
        if do_clean_tmp:
            logger.info("Cleaning Temporary dirs")
            clean_temp_dir()

        # Exit in case of dry_run
        if dry_run:
            execute_loop = False

        # Sleep some time
        time.sleep(float(es_constants.es2globals['system_sleep_time_sec']))
예제 #9
0
from lib.python import functions
from lib.python import es_logging as log
from config import es_constants
from apps.tools import ingest_historical_archives as iha
from apps.es2system import convert_2_spirits as conv
from database import querydb
from apps.productmanagement.products import *
from lib.python.daemon import DaemonDryRunnable

_author__ = "Marco Clerici"

logger = log.my_logger(__name__)
data_dir = es_constants.es2globals['processing_dir']

# Get the local systems settings
systemsettings = functions.getSystemSettings()


def get_status_local_machine():
    #   Get info on the status of the local machine
    #

    logger.debug("Entering routine %s" % 'get_status_local_machine')

    # Get the local systems settings
    # systemsettings = functions.getSystemSettings()

    # Get status of all services
    status_services = functions.getStatusAllServices()

    get_eumetcast_status = status_services['eumetcast']
예제 #10
0
def loop_ingestion_drive(dry_run=False, test_one_product=None):
    echo_query = False
    # Get all active product ingestion records with a subproduct count.
    active_product_ingestions = querydb.get_ingestion_product(allrecs=True)

    for active_product_ingest in active_product_ingestions:

        productcode = active_product_ingest[0]
        productversion = active_product_ingest[1]

        # Verify the test-one-product case
        do_ingest_product = is_test_one_product(test_one_product, productcode)

        if do_ingest_product:
            logger.info("Ingestion active for product: [%s] subproduct N. %s" %
                        (active_product_ingest[0], active_product_ingest[2]))
            # For the current active product ingestion: get all
            product = {"productcode": productcode, "version": productversion}
            logger.debug("Processing product: %s - version %s" %
                         (productcode, productversion))

            # Get the list of acquisition sources that are defined for this ingestion 'trigger'
            # (i.e. prod/version)
            # NOTE: the following implies there is 1 and only 1 '_native' subproduct associated to a 'product';
            native_product = {
                "productcode": productcode,
                "subproductcode": productcode + "_native",
                "version": productversion
            }

            sources_list = querydb.get_product_sources(**native_product)

            logger.debug("For product [%s] N. %s  source is/are found" %
                         (productcode, len(sources_list)))

            systemsettings = functions.getSystemSettings()

            for source in sources_list:

                logger_spec = log.my_logger('apps.ingestion.' + productcode +
                                            '.' + productversion)
                logger.debug("Processing Source type [%s] with id [%s]" %
                             (source.type, source.data_source_id))
                # Re-initialize the datasource_descr
                # datasource_descr = None

                # Get datasource desctiption
                datasource_descr = querydb.get_datasource_descr(
                    source_type=source.type, source_id=source.data_source_id)
                datasource_descr = datasource_descr[0]
                # TODO optimize this in order to get direct file filter expression
                my_filter_expr = get_filenaming_info(source, datasource_descr)

                files = get_files_matching_with_file_expression(my_filter_expr)

                # See ES2-204
                logger_spec.debug(
                    "Number of files found for product [%s] is: %s" %
                    (active_product_ingest[0], len(files)))
                if len(files) > 0:
                    # Get list of ingestions triggers [prod/subprod/mapset]
                    ingestions = querydb.get_ingestion_subproduct(
                        allrecs=False, **product)

                    # Loop over ingestion triggers
                    subproducts = list()
                    for ingest in ingestions:
                        # TODO if one ingest gives true and another false?
                        dates_not_in_filename = is_date_not_in_filename(
                            ingest.input_to_process_re)
                        logger.debug(" --> processing subproduct: %s" %
                                     ingest.subproductcode)

                        args = {
                            "productcode": product['productcode'],
                            "subproductcode": ingest.subproductcode,
                            "datasource_descr_id":
                            datasource_descr.datasource_descr_id,
                            "version": product['version']
                        }
                        product_in_info = querydb.get_product_in_info(**args)
                        # TODO verify the approach Should we get subproduct from single query
                        subproduct = get_subproduct(
                            ingest, product_in_info,
                            datasource_descr.datasource_descr_id)
                        if subproduct is not None:
                            subproducts.append(subproduct)

                    if subproducts is None:
                        #TODO what to do?
                        logger.error(
                            "For current active ingestion No subproducts for Product [%s] "
                            % (productcode))

                    # Get the list of unique dates by extracting the date from all files.
                    dates_list = get_list_unique_dates(datasource_descr, files,
                                                       dates_not_in_filename,
                                                       product_in_info,
                                                       ingest.mapsetcode)

                    # Loop over dates and get list of files
                    for in_date in dates_list:
                        date_fileslist = get_dates_file_list(
                            dates_not_in_filename, files, my_filter_expr,
                            in_date, logger_spec)
                        # Pass list of files to ingestion routine
                        if (not dry_run):
                            try:
                                result = ingestion(date_fileslist,
                                                   in_date,
                                                   product,
                                                   subproducts,
                                                   datasource_descr,
                                                   logger_spec,
                                                   echo_query=echo_query)
                            except:
                                logger.error(
                                    "Error in ingestion of file [%s] " %
                                    (functions.conv_list_2_string(
                                        date_fileslist)))
                            else:
                                # Result is None means we are still waiting for some files to be received. Keep files in /data/ingest
                                # dates_not_in_filename means the input files contains many dates (e.g. GSOD precip)
                                if result is not None and not dates_not_in_filename:
                                    if source.store_original_data or systemsettings[
                                            'type_installation'] == 'Server':
                                        store_native_files(
                                            product, date_fileslist,
                                            logger_spec)
                                    else:
                                        delete_files(date_fileslist,
                                                     logger_spec)

                        else:
                            time.sleep(10)