예제 #1
0
    def handle(self, *args, **options):
        # make sure we're at project root
        chdir_dmd()

        self.json_folder = options.get('json_folder')

        logger.info("Importing Entities from JSON files from `{}`".format(
            self.json_folder))

        if not p(self.json_folder).isdir():
            logger.error("JSON folder `{}` is not a directory.".format(
                self.json_folder))

        for level in range(5):
            self.enforced_level = level

            for fpath in os.listdir(self.json_folder):
                if not fpath.endswith('.json'):
                    continue

                # import entity
                eid = fpath[:-5]
                self.import_entity(eid)

        self.enforced_level = None
예제 #2
0
    def handle(self, *args, **options):
        # make sure we're at project root
        chdir_dmd()

        self.json_folder = options.get('json_folder')

        logger.info("Importing Entities from JSON files from `{}`"
                    .format(self.json_folder))

        if not p(self.json_folder).isdir():
            logger.error("JSON folder `{}` is not a directory."
                         .format(self.json_folder))

        for level in range(5):
            self.enforced_level = level

            for fpath in os.listdir(self.json_folder):
                if not fpath.endswith('.json'):
                    continue

                # import entity
                eid = fpath[:-5]
                self.import_entity(eid)

        self.enforced_level = None
예제 #3
0
    def handle(self, *args, **options):
        # make sure we're at project root
        chdir_dmd()

        # django config of the DB
        dbconf = settings.DATABASES.get('default', {})

        if dbconf.get('ENGINE').endswith('mysql'):
            # dump the MySQL DB to a file
            dump_name = "{name}.sql".format(name=dbconf.get('NAME'))
            dump_path = os.path.join(settings.BACKUPS_REPOSITORY, dump_name)
            cmd = [
                'mysqldump', '-h',
                dbconf.get('HOST'), '-u',
                dbconf.get('USER'),
                '-p{passwd}'.format(passwd=dbconf.get('PASSWORD')), '-r',
                dump_path,
                dbconf.get('NAME')
            ]

            # dump database
            subprocess.call(cmd, shell=False)
        elif dbconf.get('ENGINE').endswith('sqlite3'):
            # copy sqlite DB to backup dir
            dump_name = os.path.basename(dbconf.get('NAME'))
            dump_path = os.path.join(settings.BACKUPS_REPOSITORY, dump_name)
            shutil.copy2(dbconf.get('NAME'), dump_path)
        else:
            logger.error("DB engine `{engine}` not supported".format(
                engine=dbconf.get('ENGINE')))

        # compression is done from backup dir
        curdir = os.getcwd()
        os.chdir(settings.BACKUPS_REPOSITORY)

        ark_name = "{orig}_{date}.7z".format(
            orig=os.path.basename(dump_path),
            date=timezone.now().strftime("%Y-%m-%d"))

        # compress the DB dump
        subprocess.call(['7z', 'a', ark_name, dump_name])

        # delete uncompressed file
        try:
            os.unlink(dump_path)
        except:
            pass

        # restore working dir
        os.chdir(curdir)
예제 #4
0
파일: dump_db.py 프로젝트: yeleman/dmd
    def handle(self, *args, **options):
        # make sure we're at project root
        chdir_dmd()

        # django config of the DB
        dbconf = settings.DATABASES.get('default', {})

        if dbconf.get('ENGINE').endswith('mysql'):
            # dump the MySQL DB to a file
            dump_name = "{name}.sql".format(name=dbconf.get('NAME'))
            dump_path = os.path.join(settings.BACKUPS_REPOSITORY, dump_name)
            cmd = ['mysqldump',
                   '-h', dbconf.get('HOST'),
                   '-u', dbconf.get('USER'),
                   '-p{passwd}'.format(passwd=dbconf.get('PASSWORD')),
                   '-r', dump_path,
                   dbconf.get('NAME')]

            # dump database
            subprocess.call(cmd, shell=False)
        elif dbconf.get('ENGINE').endswith('sqlite3'):
            # copy sqlite DB to backup dir
            dump_name = os.path.basename(dbconf.get('NAME'))
            dump_path = os.path.join(settings.BACKUPS_REPOSITORY, dump_name)
            shutil.copy2(dbconf.get('NAME'), dump_path)
        else:
            logger.error("DB engine `{engine}` not supported"
                         .format(engine=dbconf.get('ENGINE')))

        # compression is done from backup dir
        curdir = os.getcwd()
        os.chdir(settings.BACKUPS_REPOSITORY)

        ark_name = "{orig}_{date}.7z".format(
            orig=os.path.basename(dump_path),
            date=timezone.now().strftime("%Y-%m-%d"))

        # compress the DB dump
        subprocess.call(['7z', 'a', ark_name, dump_name])

        # delete uncompressed file
        try:
            os.unlink(dump_path)
        except:
            pass

        # restore working dir
        os.chdir(curdir)
예제 #5
0
파일: rotate_dumps.py 프로젝트: yeleman/dmd
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        # daily backups for last 7 days.
        # Monday backups for last 30 days.
        # first of each month

        today = datetime.datetime.today()
        aweek_ago = today - datetime.timedelta(days=7)
        amonth_ago = today - datetime.timedelta(days=30)

        for fname in os.listdir(settings.BACKUPS_REPOSITORY):
            if fname in ('.', '..', 'README'):
                continue

            if not fname.endswith('.7z'):
                continue

            fpath = os.path.join(settings.BACKUPS_REPOSITORY, fname)
            fdate = datetime.datetime(
                *[int(x)
                  for x in fname.rsplit('.7z', 1)[0]
                  .rsplit('_', 1)[1].split('-')])

            # keep all files less than a week old
            if fdate > aweek_ago:
                logger.info("Keeping {} - less than a week old".format(fname))
                continue

            # keep every mondays that are less than a month old
            if fdate.isoweekday() == 1 and fdate > amonth_ago:
                logger.info("Keeping {} - monday within a month".format(fname))
                continue

            # keep first of all month
            if fdate.day == 1:
                logger.info("Keeping {} - first of a month".format(fname))
                continue

            try:
                logger.info("Removing {}".format(fname))
                os.unlink(fpath)
            except OSError as exp:
                logger.error("Unable to delete {f}: {exp}"
                             .format(f=fpath, exp=exp))
예제 #6
0
    def handle(self, *args, **kwargs):

        # make sure we're at project root
        chdir_dmd()

        logger.info("Exporting all DataRecord to: {}"
                    .format(settings.ALL_EXPORT_PATH))
        qs = get_records()
        nb_records = qs.count()
        get_csv_for(qs, save_to=settings.ALL_EXPORT_PATH)

        logger.info("Exporting all DataRecord (XLS 1sheet/indicator) to {}"
                    .format(settings.ALL_EXPORT_XLSX_PATH))

        export_to_spreadsheet(qs, save_to=settings.ALL_EXPORT_XLSX_PATH)

        Metadata.update('nb_records', nb_records)

        logger.info("All done")
예제 #7
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        dps_name = options.get('dps')
        if not dps_name:
            logger.error("Unable to match DPS with name `{}`".format(dps_name))
            return 1

        rdc = Entity.get_root()
        dps = Entity.lookup_at(parent=rdc, name=dps_name)[0]
        if dps is None:
            logger.error("Unable to match DPS with name `{}`".format(dps_name))
            return 1

        logger.info("Generating XLS dataentry tool for `{}`".format(dps_name))

        generate_dataentry_for(dps, 'dataentry.xlsx')
예제 #8
0
    def handle(self, *args, **kwargs):

        # make sure we're at project root
        chdir_dmd()

        logger.info("Exporting all DataRecord to: {}".format(
            settings.ALL_EXPORT_PATH))
        qs = get_records()
        nb_records = qs.count()
        get_csv_for(qs, save_to=settings.ALL_EXPORT_PATH)

        logger.info(
            "Exporting all DataRecord (XLS 1sheet/indicator) to {}".format(
                settings.ALL_EXPORT_XLSX_PATH))

        export_to_spreadsheet(qs, save_to=settings.ALL_EXPORT_XLSX_PATH)

        Metadata.update('nb_records', nb_records)

        logger.info("All done")
예제 #9
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        if not os.path.exists(options.get('file')):
            logger.error("GeoJSON file does not exit.")
            return False

        with open(options.get('file'), 'r') as f:
            gjson = json.load(f)

        rdc = Entity.get_root()

        for feature in gjson['features']:
            dps_name = feature['properties'].get('NOM_DPS')
            if dps_name:
                name = dps_name
                logger.debug(name)
                entity = Entity.objects.get(name=name)
            else:
                zs_name = feature['properties'].get('NAME')
                dps_name = feature['properties'].get('DPS')

                logger.debug("dps: {d} - zs: {z}".format(d=dps_name,
                                                         z=zs_name))

                parent = Entity.find_by_stdname(parent=rdc, std_name=dps_name)
                logger.debug("\tparent: {p}".format(p=parent))
                assert parent is not None

                entity, children = Entity.lookup_at(parent=parent,
                                                    name=zs_name.upper())

            assert entity is not None
            logger.info(entity)

            entity.geometry = json.dumps(feature['geometry'])
            entity.save()

        logger.info("done.")
예제 #10
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        if not os.path.exists(options.get('file')):
            logger.error("GeoJSON file does not exit.")
            return False

        with open(options.get('file'), 'r') as f:
            gjson = json.load(f)

        rdc = Entity.get_root()

        for feature in gjson['features']:
            dps_name = feature['properties'].get('NOM_DPS')
            if dps_name:
                name = dps_name
                logger.debug(name)
                entity = Entity.objects.get(name=name)
            else:
                zs_name = feature['properties'].get('NAME')
                dps_name = feature['properties'].get('DPS')

                logger.debug("dps: {d} - zs: {z}"
                             .format(d=dps_name, z=zs_name))

                parent = Entity.find_by_stdname(parent=rdc, std_name=dps_name)
                logger.debug("\tparent: {p}".format(p=parent))
                assert parent is not None

                entity, children = Entity.lookup_at(parent=parent,
                                                    name=zs_name.upper())

            assert entity is not None
            logger.info(entity)

            entity.geometry = json.dumps(feature['geometry'])
            entity.save()

        logger.info("done.")
예제 #11
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        logger.info("Creating latitude and longitude data for Entities")

        for entity in Entity.objects.all():
            if not entity.geometry:
                logger.debug("Skipping {}, no geometry.".format(entity))
                continue

            feature_geom = shape(entity.geojson["geometry"])
            feature_centroid = feature_geom.centroid
            entity.latitude = feature_centroid.x
            entity.longitude = feature_centroid.y
            entity.save()
            logger.info(
                "{name}: {lat}, {lng}".format(name=entity.short_name, lat=entity.latitude, lng=entity.longitude)
            )

        logger.info("done.")
예제 #12
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        logger.info("Creating latitude and longitude data for Entities")

        for entity in Entity.objects.all():
            if not entity.geometry:
                logger.debug("Skipping {}, no geometry.".format(entity))
                continue

            feature_geom = shape(entity.geojson['geometry'])
            feature_centroid = feature_geom.centroid
            entity.latitude = feature_centroid.x
            entity.longitude = feature_centroid.y
            entity.save()
            logger.info("{name}: {lat}, {lng}".format(name=entity.short_name,
                                                      lat=entity.latitude,
                                                      lng=entity.longitude))

        logger.info("done.")
예제 #13
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        dps_name = options.get('dps')
        if not dps_name:
            logger.error("Unable to match DPS with name `{}`"
                         .format(dps_name))
            return 1

        rdc = Entity.get_root()
        dps = Entity.lookup_at(parent=rdc, name=dps_name)[0]
        if dps is None:
            logger.error("Unable to match DPS with name `{}`"
                         .format(dps_name))
            return 1

        logger.info("Generating XLS dataentry tool for `{}`"
                    .format(dps_name))

        generate_dataentry_for(dps, 'dataentry.xlsx')
예제 #14
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        logger.info("Auto-validation started...")

        DEBUG = options.get('debug', False)

        now = timezone.now()

        # loop on non-validated DataRecord
        records = DataRecord.objects \
            .filter(validation_status=DataRecord.NOT_VALIDATED,
                    indicator__origin=Indicator.MANUAL)

        logger.info("On {on}, there are {nb} non-validated DataRecords".format(
            on=now, nb=records.count()))

        for dr in records:

            # continue if validation delay is not over
            if not dr.validation_period_is_over():
                if DEBUG:
                    logger.debug("Not validating {dr} until {date}".format(
                        dr=dr,
                        date=dr.validation_deadline.strftime('%c').decode(
                            'utf-8')))
                continue

            # auto-validate
            dr.auto_validate(on=now)

            logger.debug("Auto-validated {}".format(dr))

        logger.info("done.")
예제 #15
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        logger.info("Auto-validation started...")

        DEBUG = options.get('debug', False)

        now = timezone.now()

        # loop on non-validated DataRecord
        records = DataRecord.objects \
            .filter(validation_status=DataRecord.NOT_VALIDATED,
                    indicator__origin=Indicator.MANUAL)

        logger.info("On {on}, there are {nb} non-validated DataRecords"
                    .format(on=now, nb=records.count()))

        for dr in records:

            # continue if validation delay is not over
            if not dr.validation_period_is_over():
                if DEBUG:
                    logger.debug("Not validating {dr} until {date}".format(
                        dr=dr,
                        date=dr.validation_deadline.strftime('%c')
                                                   .decode('utf-8')))
                continue

            # auto-validate
            dr.auto_validate(on=now)

            logger.debug("Auto-validated {}".format(dr))

        logger.info("done.")
예제 #16
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        # options parsing
        self.debug = options.get('debug')
        update = options.get('update')
        period = MonthPeriod.get_or_none(options.get('period'))
        if period is None:
            logger.error(
                "Unable to match an actual period from `{}`".format(period))

        if options.get('previous', False):
            periods = []
            p = period
            while p > MonthPeriod.objects.all().first():
                periods.append(p)
                if len(periods) >= NB_PREVIOUS_PERIODS:
                    break
                p = p.previous()
        else:
            periods = [period]

        upath = '/analytics.json'

        indicators = {
            i.slug: (i.dhis_numerator_id, i.dhis_denominator_id)
            for i in Indicator.get_all_dhis()
        }
        dhis_ids = list(
            set([v[0] for v in indicators.values()] +
                [v[1] for v in indicators.values()]))

        drc = Entity.get_root()
        params = {
            'dimension': [
                'dx:{}'.format(";".join(dhis_ids)),
                'pe:{}'.format(";".join([pe.dhis_strid for pe in periods]))
            ],
            'filter':
            'ou:{}'.format(drc.dhis_id),
            'displayProperty':
            'NAME',
            'outputIdScheme':
            'ID',
            'skipRounding':
            True,
        }

        logger.info(drc)
        if update or self.no_record_at(entity=drc, period=period):
            self.handle_record(get_dhis(path=upath, params=params),
                               entity=drc,
                               periods=periods)

        for dps in drc.get_children():
            logger.info(dps)

            if not update and not self.no_record_at(entity=dps, period=period):
                continue

            dparams = copy.copy(params)
            dparams.update({'filter': 'ou:{}'.format(dps.dhis_id)})
            self.handle_record(get_dhis(path=upath, params=dparams),
                               entity=dps,
                               periods=periods)

            # don't look for ZS if no data at DPS
            if self.no_record_at(entity=dps, period=period):
                continue

            for zs in dps.get_children():
                logger.info(zs)

                if not update and not self.no_record_at(entity=zs,
                                                        period=period):
                    continue

                zparams = copy.copy(params)
                zparams.update({'filter': 'ou:{}'.format(zs.dhis_id)})
                self.handle_record(get_dhis(path=upath, params=zparams),
                                   entity=zs,
                                   periods=periods)

                # don't look for ZS if no data at DPS
                if self.no_record_at(entity=zs, period=period):
                    continue

                for aire in zs.get_children():
                    logger.info(aire)

                    if not update and not self.no_record_at(entity=aire,
                                                            period=period):
                        continue

                    aparams = copy.copy(params)
                    aparams.update({'filter': 'ou:{}'.format(aire.dhis_id)})
                    self.handle_record(get_dhis(path=upath, params=aparams),
                                       entity=aire,
                                       periods=periods)
예제 #17
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        yesterday = datetime.date.today() - datetime.timedelta(days=1)
        logger.info("Sending validation feedback for {}".format(yesterday))

        start = datetime.datetime(*yesterday.timetuple()[:6])
        end = datetime.datetime(*yesterday.timetuple()[:3],
                                hour=23,
                                minute=59,
                                second=59)

        dhis_bot = Partner.dhis_bot()

        records = DataRecord.objects \
            .exclude(created_by=dhis_bot) \
            .exclude(validated_by__isnull=True) \
            .filter(validated_on__gte=start,
                    validated_on__lte=end)

        def summary_for(partner):
            pqs = records.filter(created_by=partner)
            return {
                'partner': partner,
                'nb_validations': pqs.count(),
                'status': {
                    s['validation_status']: {
                        'name':
                        text_type(
                            DataRecord.VALIDATION_STATUSES.get(
                                s['validation_status'])),
                        'count':
                        pqs.filter(
                            validation_status=s['validation_status']).count(),
                        'all':
                        pqs.filter(validation_status=s['validation_status'])
                    }
                    for s in pqs.values('validation_status')
                }
            }

        feedbacks = [
            summary_for(Partner.objects.get(id=pid)) for pid in set(
                [r['created_by'] for r in records.values('created_by')]) if pid
        ]

        for ptnf in feedbacks:
            partner = ptnf['partner']

            if not partner.email:
                continue

            ptnf.update({'yesterday': yesterday})
            email_sent, x = send_validation_feedback_email(partner=partner,
                                                           summary=ptnf)
            if email_sent:
                logger.info("Sent feedback to {}".format(partner))
            else:
                logger.error("Unable to send feedback to {}".format(partner))

        logger.info("done.")
예제 #18
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        logger.info("Updating cache for dashboard completeness...")

        root = Entity.get_root()
        periods = MonthPeriod.all_till_now()
        all_dps = root.get_children()
        all_entities = list(all_dps) + [root]
        indicators = Indicator.objects.all()
        all_indicators = list(indicators) + [None]
        nb_items = len(periods) * len(all_dps) * len(all_indicators)

        nb_ran = 0
        for period in periods:
            # logger.debug("{}".format(period))
            for dps in all_dps:
                # logger.debug("== {}".format(dps))
                for indicator in all_indicators:
                    nb_ran += 1
                    # logger.debug("==== {}".format(indicator))

                    params = {
                        'dps': dps,
                        'period': period,
                        'indicator': indicator
                    }

                    # existing cache 4months+ old are not regenerated
                    if period <= periods[-4]:
                        if cache_exists_for('completeness', **params):
                            # logger.info("***** Skipping existing.")
                            continue

                    update_cached_data('completeness', **params)

                    sys.stdout.write("{}/{} - {}%\r".format(
                        nb_ran, nb_items, int(nb_ran / nb_items * 100)))
                    sys.stdout.flush()

        logger.info("Updating cache for section2/arrivals...")

        nb_items = len(periods) * len(all_dps) * len(indicators)
        nb_ran = 0
        for period in periods:
            for entity in all_entities:
                for indicator in indicators:
                    params = {
                        'entity': entity,
                        'period': period,
                        'indicator': indicator
                    }
                    if period <= periods[-4]:
                        if cache_exists_for('section2-arrivals', **params):
                            continue

                        update_cached_data('section2-arrivals',
                                           entity=entity,
                                           period=period,
                                           indicator=indicator)

                        sys.stdout.write("{}/{} - {}%\r".format(
                            nb_ran, nb_items, int(nb_ran / nb_items * 100)))
                        sys.stdout.flush()

        logger.info("Updating cache for section2/points")

        nb_items = len(periods) * len(all_dps)
        nb_ran = 0
        for period in periods:
            for entity in all_entities:
                if period <= periods[-4]:
                    if cache_exists_for('section2-points', **params):
                        continue

                    update_cached_data('section2-points',
                                       entity=entity,
                                       period=period)

                    sys.stdout.write("{}/{} - {}%\r".format(
                        nb_ran, nb_items, int(nb_ran / nb_items * 100)))
                    sys.stdout.flush()

        logger.info("done.")
예제 #19
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        # options parsing
        self.debug = options.get('debug')
        update = options.get('update')
        period = MonthPeriod.get_or_none(options.get('period'))
        if period is None:
            logger.error("Unable to match an actual period from `{}`"
                         .format(period))

        if options.get('previous', False):
            periods = []
            p = period
            while p > MonthPeriod.objects.all().first():
                periods.append(p)
                if len(periods) >= NB_PREVIOUS_PERIODS:
                    break
                p = p.previous()
        else:
            periods = [period]

        upath = '/analytics.json'

        indicators = {i.slug: (i.dhis_numerator_id, i.dhis_denominator_id)
                      for i in Indicator.get_all_dhis()}
        dhis_ids = list(set([v[0] for v in indicators.values()] +
                            [v[1] for v in indicators.values()]))

        drc = Entity.get_root()
        params = {
            'dimension': ['dx:{}'.format(";".join(dhis_ids)),
                          'pe:{}'.format(
                          ";".join([pe.dhis_strid for pe in periods]))],
            'filter': 'ou:{}'.format(drc.dhis_id),
            'displayProperty': 'NAME',
            'outputIdScheme': 'ID',
            'skipRounding': True,
        }

        logger.info(drc)
        if update or self.no_record_at(entity=drc, period=period):
            self.handle_record(get_dhis(path=upath, params=params),
                               entity=drc, periods=periods)

        for dps in drc.get_children():
            logger.info(dps)

            if not update and not self.no_record_at(entity=dps, period=period):
                continue

            dparams = copy.copy(params)
            dparams.update({'filter': 'ou:{}'.format(dps.dhis_id)})
            self.handle_record(get_dhis(path=upath, params=dparams),
                               entity=dps, periods=periods)

            # don't look for ZS if no data at DPS
            if self.no_record_at(entity=dps, period=period):
                continue

            for zs in dps.get_children():
                logger.info(zs)

                if not update and not self.no_record_at(entity=zs,
                                                        period=period):
                    continue

                zparams = copy.copy(params)
                zparams.update({'filter': 'ou:{}'.format(zs.dhis_id)})
                self.handle_record(get_dhis(path=upath, params=zparams),
                                   entity=zs, periods=periods)

                # don't look for ZS if no data at DPS
                if self.no_record_at(entity=zs, period=period):
                    continue

                for aire in zs.get_children():
                    logger.info(aire)

                    if not update and not self.no_record_at(entity=aire,
                                                            period=period):
                        continue

                    aparams = copy.copy(params)
                    aparams.update({'filter': 'ou:{}'.format(aire.dhis_id)})
                    self.handle_record(get_dhis(path=upath, params=aparams),
                                       entity=aire, periods=periods)
예제 #20
0
    def handle(self, *args, **options):

        # make sure we're at project root
        chdir_dmd()

        logger.info("Updating cache for dashboard completeness...")

        root = Entity.get_root()
        periods = MonthPeriod.all_till_now()
        all_dps = root.get_children()
        all_entities = list(all_dps) + [root]
        indicators = Indicator.objects.all()
        all_indicators = list(indicators) + [None]
        nb_items = len(periods) * len(all_dps) * len(all_indicators)

        nb_ran = 0
        for period in periods:
            # logger.debug("{}".format(period))
            for dps in all_dps:
                # logger.debug("== {}".format(dps))
                for indicator in all_indicators:
                    nb_ran += 1
                    # logger.debug("==== {}".format(indicator))

                    params = {
                        'dps': dps,
                        'period': period,
                        'indicator': indicator
                    }

                    # existing cache 4months+ old are not regenerated
                    if period <= periods[-4]:
                        if cache_exists_for('completeness', **params):
                            # logger.info("***** Skipping existing.")
                            continue

                    update_cached_data('completeness', **params)

                    sys.stdout.write("{}/{} - {}%\r"
                                     .format(nb_ran, nb_items,
                                             int(nb_ran / nb_items * 100)))
                    sys.stdout.flush()

        logger.info("Updating cache for section2/arrivals...")

        nb_items = len(periods) * len(all_dps) * len(indicators)
        nb_ran = 0
        for period in periods:
            for entity in all_entities:
                for indicator in indicators:
                    params = {
                        'entity': entity,
                        'period': period,
                        'indicator': indicator
                    }
                    if period <= periods[-4]:
                        if cache_exists_for('section2-arrivals', **params):
                            continue

                        update_cached_data('section2-arrivals',
                                           entity=entity,
                                           period=period,
                                           indicator=indicator)

                        sys.stdout.write("{}/{} - {}%\r"
                                         .format(nb_ran, nb_items,
                                                 int(nb_ran / nb_items * 100)))
                        sys.stdout.flush()

        logger.info("Updating cache for section2/points")

        nb_items = len(periods) * len(all_dps)
        nb_ran = 0
        for period in periods:
            for entity in all_entities:
                if period <= periods[-4]:
                    if cache_exists_for('section2-points', **params):
                        continue

                    update_cached_data('section2-points',
                                       entity=entity,
                                       period=period)

                    sys.stdout.write("{}/{} - {}%\r"
                                     .format(nb_ran, nb_items,
                                             int(nb_ran / nb_items * 100)))
                    sys.stdout.flush()

        logger.info("done.")