def test_dataset_issued_no_inference(self): catalog = DataJson(os.path.join(SAMPLES_DIR, 'full_ts_data.json')) self.loader.run(catalog, self.catalog_id) issued = Dataset.objects.first().issued self.assertEqual( issued.date(), iso8601.parse_date(catalog.get_datasets()[0]['issued']).date())
def process_catalog(org, datajson): """Descarga y procesa el catálogo correspondiente a la organización.""" logger.info('=== Catálogo %s ===', org.upper()) os.chdir(org) try: config = ORGANISMS[org] logger.info('- Lectura de catálogo') # For XLSX catalogs, creates corresponding JSON file_ext = config["formato"] if file_ext == 'xlsx': logger.info('- Transformación de XLSX a JSON') catalog = DataJson(read_xlsx_catalog(config['url'])) elif file_ext == 'json': catalog = DataJson( requests.get(config['url'], verify=False).json()) elif file_ext == 'ckan': catalog = DataJson(read_ckan_catalog(config['url'])) else: raise ValueError('%s no es una extension valida para un catalogo.', file_ext) # agrega dataset_identifier y distribution_identifier catalog.get_datasets() catalog.get_distributions() catalog.get_time_series() logger.info('- Escritura de catálogo') if catalog and len(catalog) > 0: catalog.to_json('data.json') else: raise Exception("El catálogo {} no se pudo generar".format(org)) # Creates README and auxiliary reports logger.info('- Generación de reportes') datajson.generate_catalog_readme(catalog, export_path='README.md') datajson.generate_datasets_summary(catalog, export_path='datasets.csv') except: logger.error('Error al procesar el catálogo de %s', org, exc_info=True) finally: os.chdir('..') # Returns to parent dir.
def test_distribution_issued_no_inference(self): catalog = DataJson(os.path.join(SAMPLES_DIR, 'full_ts_data.json')) catalog.get_datasets()[0]['distribution'][0]['issued'] = '2016-04-14' self.loader.run(catalog, self.catalog_id) issued = Distribution.objects.first().issued self.assertEqual(issued.date(), datetime(2016, 4, 14).date())