def init_db(options): from tkp.config import initialize_pipeline_config, get_database_config cfgfile = os.path.join(os.getcwd(), "pipeline.cfg") if os.path.exists(cfgfile): pipe_config = initialize_pipeline_config(cfgfile, "notset") dbconfig = get_database_config(pipe_config['database'], apply=False) else: dbconfig = get_database_config(None, apply=False) if 'engine' not in dbconfig or not dbconfig['engine']: dbconfig['engine'] = 'postgresql' if 'port' not in dbconfig or not dbconfig['port']: if dbconfig['engine'] == 'monetdb': dbconfig['port'] = 50000 else: dbconfig['port'] = 5432 if 'database' not in dbconfig or not dbconfig['database']: dbconfig['database'] = getpass.getuser() if 'user' not in dbconfig or not dbconfig['user']: dbconfig['user'] = dbconfig['database'] if 'password' not in dbconfig or not dbconfig['password']: dbconfig['password'] = dbconfig['user'] if 'host' not in dbconfig or not dbconfig['host']: dbconfig['host'] = 'localhost' dbconfig['yes'] = options.yes dbconfig['destroy'] = options.destroy populate(dbconfig)
def get_db_config(): cfgfile = os.path.join(os.getcwd(), "pipeline.cfg") if os.path.exists(cfgfile): pipe_config = initialize_pipeline_config(cfgfile, "notset") dbconfig = get_database_config(pipe_config['database'], apply=False) else: dbconfig = get_database_config(None, apply=False) return dbconfig
def setUp(self): # Wipe out any pre-existing environment settings self.old_environment = os.environ.copy() os.environ.pop("TKP_DBENGINE", None) os.environ.pop("TKP_DBNAME", None) os.environ.pop("TKP_DBUSER", None) os.environ.pop("TKP_DBPASSWORD", None) os.environ.pop("TKP_DBHOST", None) os.environ.pop("TKP_DBPORT", None) self.pipeline_cfg = initialize_pipeline_config(default_pipeline_config, 'test')
def setUpClass(cls): cls.dataset_id = db_subs.create_dataset_8images() cls.images = [testdata.fits_file] cls.extraction_radius = 256 job_config = SafeConfigParser() job_config.read(default_job_config) job_config = parse_to_dict(job_config) cls.persistence_pars = job_config['persistence'] pipe_config = initialize_pipeline_config(default_pipeline_config, job_name="test_persistence") cls.image_cache_pars = pipe_config['image_cache']
def init_db(options): from tkp.config import initialize_pipeline_config, get_database_config cfgfile = os.path.join(os.getcwd(), "pipeline.cfg") if os.path.exists(cfgfile): pipe_config = initialize_pipeline_config(cfgfile, "notset") dbconfig = get_database_config(pipe_config['database'], apply=False) else: dbconfig = get_database_config(None, apply=False) dbconfig['yes'] = options.yes dbconfig['destroy'] = options.destroy populate(dbconfig)
def setUpClass(cls): dataset = tkp.db.DataSet(data={'description': "Test persistence"}) cls.dataset_id = dataset.id cls.images = [datafile] cls.extraction_radius = 256 job_config = SafeConfigParser() job_config.read(default_job_config) job_config = parse_to_dict(job_config) cls.persistence_pars = job_config['persistence'] pipe_config = initialize_pipeline_config(default_pipeline_config, job_name="test_persistence") cls.image_cache_pars = pipe_config['image_cache']
def setUpClass(cls): dataset = tkp.db.DataSet(data={"description": "Test persistence"}) cls.dataset_id = dataset.id cls.images = [datafile] cls.accessors = [tkp.accessors.open(datafile)] cls.extraction_radius = 256 job_config = SafeConfigParser() job_config.read(default_job_config) job_config = parse_to_dict(job_config) cls.persistence_pars = job_config["persistence"] pipe_config = initialize_pipeline_config(default_pipeline_config, job_name="test_persistence") cls.image_cache_pars = pipe_config["image_cache"]
def init_db(options): from tkp.config import initialize_pipeline_config, database_config pipe_config = initialize_pipeline_config( os.path.join(os.getcwd(), "pipeline.cfg"), "notset") dbconfig = database_config(pipe_config, apply=False) for field in ['engine', 'database', 'user', 'password', 'host', 'port', 'passphrase']: value = getattr(options, field) if value: dbconfig[field] = value if 'engine' not in dbconfig or not dbconfig['engine']: dbconfig['engine'] = 'postgresql' if 'port' not in dbconfig or not dbconfig['port']: if dbconfig['engine'] == 'monetdb': dbconfig['port'] = 50000 else: dbconfig['port'] = 5432 if 'database' not in dbconfig or not dbconfig['database']: dbconfig['database'] = getpass.getuser() if 'user' not in dbconfig or not dbconfig['user']: dbconfig['user'] = dbconfig['database'] if 'password' not in dbconfig or not dbconfig['password']: dbconfig['password'] = dbconfig['user'] if 'host' not in dbconfig or not dbconfig['host']: dbconfig['host'] = 'localhost' dbconfig['yes'] = options.yes if 'passphrase' not in dbconfig: dbconfig['passphrase'] = "" populate(dbconfig)
def test_default_pipeline_config(self): pipe_config = initialize_pipeline_config(default_pipeline_config, 'test')
def run(job_name, supplied_mon_coords=[]): pipe_config = initialize_pipeline_config( os.path.join(os.getcwd(), "pipeline.cfg"), job_name) # get parallelise props. Defaults to multiproc with autodetect num cores parallelise = pipe_config.get('parallelise', {}) distributor = os.environ.get('TKP_PARALLELISE', parallelise.get('method', 'multiproc')) runner = Runner(distributor=distributor, cores=parallelise.get('cores', 0)) debug = pipe_config.logging.debug #Setup logfile before we do anything else log_dir = pipe_config.logging.log_dir setup_log_file(log_dir, debug) job_dir = pipe_config.DEFAULT.job_directory if not os.access(job_dir, os.X_OK): msg = "can't access job folder %s" % job_dir logger.error(msg) raise IOError(msg) logger.info("Job dir: %s", job_dir) db_config = get_database_config(pipe_config.database, apply=True) dump_database_backup(db_config, job_dir) job_config = load_job_config(pipe_config) se_parset = job_config.source_extraction deruiter_radius = job_config.association.deruiter_radius beamwidths_limit = job_config.association.beamwidths_limit new_src_sigma = job_config.transient_search.new_source_sigma_margin all_images = imp.load_source('images_to_process', os.path.join(job_dir, 'images_to_process.py')).images logger.info("dataset %s contains %s images" % (job_name, len(all_images))) logger.info("performing database consistency check") if not dbconsistency.check(): logger.error("Inconsistent database found; aborting") return 1 dataset_id = create_dataset(job_config.persistence.dataset_id, job_config.persistence.description) if job_config.persistence.dataset_id == -1: store_config(job_config, dataset_id) # new data set if supplied_mon_coords: dbgen.insert_monitor_positions(dataset_id,supplied_mon_coords) else: job_config_from_db = fetch_config(dataset_id) # existing data set if check_job_configs_match(job_config, job_config_from_db): logger.debug("Job configs from file / database match OK.") else: logger.warn("Job config file has changed since dataset was " "first loaded into database. ") logger.warn("Using job config settings loaded from database, see " "log dir for details") job_config = job_config_from_db if supplied_mon_coords: logger.warn("Monitor positions supplied will be ignored. " "(Previous dataset specified)") dump_configs_to_logdir(log_dir, job_config, pipe_config) logger.info("performing persistence step") image_cache_params = pipe_config.image_cache imgs = [[img] for img in all_images] rms_est_sigma = job_config.persistence.rms_est_sigma rms_est_fraction = job_config.persistence.rms_est_fraction metadatas = runner.map("persistence_node_step", imgs, [image_cache_params, rms_est_sigma, rms_est_fraction]) metadatas = [m[0] for m in metadatas if m] logger.info("Storing images") image_ids = store_images(metadatas, job_config.source_extraction.extraction_radius_pix, dataset_id) db_images = [Image(id=image_id) for image_id in image_ids] logger.info("performing quality check") urls = [img.url for img in db_images] arguments = [job_config] rejecteds = runner.map("quality_reject_check", urls, arguments) good_images = [] for image, rejected in zip(db_images, rejecteds): if rejected: reason, comment = rejected steps.quality.reject_image(image.id, reason, comment) else: good_images.append(image) if not good_images: logger.warn("No good images under these quality checking criteria") return grouped_images = group_per_timestep(good_images) timestep_num = len(grouped_images) for n, (timestep, images) in enumerate(grouped_images): msg = "processing %s images in timestep %s (%s/%s)" logger.info(msg % (len(images), timestep, n+1, timestep_num)) logger.info("performing source extraction") urls = [img.url for img in images] arguments = [se_parset] extraction_results = runner.map("extract_sources", urls, arguments) logger.info("storing extracted sources to database") # we also set the image max,min RMS values which calculated during # source extraction for image, results in zip(images, extraction_results): image.update(rms_min=results.rms_min, rms_max=results.rms_max, detection_thresh=se_parset['detection_threshold'], analysis_thresh=se_parset['analysis_threshold']) dbgen.insert_extracted_sources(image.id, results.sources, 'blind') logger.info("performing database operations") for image in images: logger.info("performing DB operations for image %s" % image.id) logger.info("performing source association") dbass.associate_extracted_sources(image.id, deRuiter_r=deruiter_radius, new_source_sigma_margin=new_src_sigma) all_fit_posns, all_fit_ids = steps_ff.get_forced_fit_requests(image) if all_fit_posns: successful_fits, successful_ids = steps_ff.perform_forced_fits( all_fit_posns, all_fit_ids, image.url, se_parset) steps_ff.insert_and_associate_forced_fits(image.id,successful_fits, successful_ids) dbgen.update_dataset_process_end_ts(dataset_id)
def get_pipe_config(job_name): return initialize_pipeline_config(os.path.join(os.getcwd(), "pipeline.cfg"), job_name)
def run(job_name, mon_coords, local=False): setup_event_listening(celery_app) pipe_config = initialize_pipeline_config( os.path.join(os.getcwd(), "pipeline.cfg"), job_name) debug = pipe_config.logging.debug #Setup logfile before we do anything else log_dir = pipe_config.logging.log_dir setup_log_file(log_dir, debug) job_dir = pipe_config.DEFAULT.job_directory if not os.access(job_dir, os.X_OK): msg = "can't access job folder %s" % job_dir logger.error(msg) raise IOError(msg) logger.info("Job dir: %s", job_dir) db_config = get_database_config(pipe_config.database, apply=True) dump_database_backup(db_config, job_dir) job_config = load_job_config(pipe_config) se_parset = job_config.source_extraction deruiter_radius = job_config.association.deruiter_radius all_images = imp.load_source('images_to_process', os.path.join(job_dir, 'images_to_process.py')).images logger.info("dataset %s contains %s images" % (job_name, len(all_images))) logger.info("performing database consistency check") if not dbconsistency.check(): logger.error("Inconsistent database found; aborting") return 1 dataset_id = create_dataset(job_config.persistence.dataset_id, job_config.persistence.description) if job_config.persistence.dataset_id == -1: store_config(job_config, dataset_id) # new data set else: job_config_from_db = fetch_config(dataset_id) # existing data set if check_job_configs_match(job_config, job_config_from_db): logger.debug("Job configs from file / database match OK.") else: logger.warn("Job config file has changed since dataset was " "first loaded into database. ") logger.warn("Using job config settings loaded from database, see " "log dir for details") job_config = job_config_from_db dump_configs_to_logdir(log_dir, job_config, pipe_config) logger.info("performing persistence step") image_cache_params = pipe_config.image_cache imgs = [[img] for img in all_images] metadatas = runner(tasks.persistence_node_step, imgs, [image_cache_params], local) metadatas = [m[0] for m in metadatas] logger.info("Storing images") image_ids = store_images(metadatas, job_config.source_extraction.extraction_radius_pix, dataset_id) db_images = [Image(id=image_id) for image_id in image_ids] logger.info("performing quality check") urls = [img.url for img in db_images] arguments = [job_config] rejecteds = runner(tasks.quality_reject_check, urls, arguments, local) good_images = [] for image, rejected in zip(db_images, rejecteds): if rejected: reason, comment = rejected steps.quality.reject_image(image.id, reason, comment) else: good_images.append(image) if not good_images: logger.warn("No good images under these quality checking criteria") return grouped_images = group_per_timestep(good_images) timestep_num = len(grouped_images) for n, (timestep, images) in enumerate(grouped_images): msg = "processing %s images in timestep %s (%s/%s)" logger.info(msg % (len(images), timestep, n+1, timestep_num)) logger.info("performing source extraction") urls = [img.url for img in images] arguments = [se_parset] extract_sources = runner(tasks.extract_sources, urls, arguments, local) logger.info("storing extracted to database") for image, sources in zip(images, extract_sources): dbgen.insert_extracted_sources(image.id, sources, 'blind') logger.info("performing database operations") for image in images: logger.info("performing DB operations for image %s" % image.id) logger.info("performing source association") dbass.associate_extracted_sources(image.id, deRuiter_r=deruiter_radius) logger.info("performing null detections") null_detections = dbnd.get_nulldetections(image.id) logger.info("Found %s null detections" % len(null_detections)) # Only if we found null_detections the next steps are necessary if len(null_detections) > 0: logger.info("performing forced fits") ff_nd = forced_fits(image.url, null_detections, se_parset) dbgen.insert_extracted_sources(image.id, ff_nd, 'ff_nd') logger.info("adding null detections") dbnd.associate_nd(image.id) if len(mon_coords) > 0: logger.info("performing monitoringlist") ff_ms = forced_fits(image.url, mon_coords, se_parset) dbgen.insert_extracted_sources(image.id, ff_ms, 'ff_ms') logger.info("adding monitoring sources") dbmon.associate_ms(image.id) transients = search_transients(image.id, job_config['transient_search']) dbgen.update_dataset_process_end_ts(dataset_id)
def run(job_name, local=False): pipe_config = initialize_pipeline_config( os.path.join(os.getcwd(), "pipeline.cfg"), job_name) database_config(pipe_config) job_dir = pipe_config.get('layout', 'job_directory') if not os.access(job_dir, os.X_OK): msg = "can't access job folder %s" % job_dir logger.error(msg) raise IOError(msg) logger.info("Job dir: %s", job_dir) images = imp.load_source('images_to_process', os.path.join(job_dir, 'images_to_process.py')).images logger.info("dataset %s contains %s images" % (job_name, len(images))) job_config = load_job_config(pipe_config) dump_job_config_to_logdir(pipe_config, job_config) p_parset = parset.load_section(job_config, 'persistence') se_parset = parset.load_section(job_config, 'source_extraction') nd_parset = parset.load_section(job_config, 'null_detections') tr_parset = parset.load_section(job_config, 'transient_search') # persistence imgs = [[img] for img in images] arguments = [p_parset] metadatas = runner(tasks.persistence_node_step, imgs, arguments, local) metadatas = [m[0] for m in metadatas] dataset_id, image_ids = steps.persistence.master_steps(metadatas, se_parset['radius'], p_parset) # manual monitoringlist entries if not add_manual_monitoringlist_entries(dataset_id, []): return 1 images = [Image(id=image_id) for image_id in image_ids] # quality_check urls = [img.url for img in images] arguments = [job_config] rejecteds = runner(tasks.quality_reject_check, urls, arguments, local) good_images = [] for image, rejected in zip(images, rejecteds): if rejected: reason, comment = rejected steps.quality.reject_image(image.id, reason, comment) else: good_images.append(image) if not good_images: logger.warn("No good images under these quality checking criteria") return # Sourcefinding urls = [img.url for img in good_images] arguments = [se_parset] extract_sources = runner(tasks.extract_sources, urls, arguments, local) for image, sources in zip(good_images, extract_sources): dbgen.insert_extracted_sources(image.id, sources, 'blind') # null_detections deRuiter_radius = nd_parset['deruiter_radius'] null_detectionss = [dbmon.get_nulldetections(image.id, deRuiter_radius) for image in good_images] iters = zip([i.url for i in good_images], null_detectionss) arguments = [nd_parset] ff_nds = runner(tasks.forced_fits, iters, arguments, local) for image, ff_nd in zip(good_images, ff_nds): dbgen.insert_extracted_sources(image.id, ff_nd, 'ff_nd') for image in good_images: logger.info("performing DB operations for image %s" % image.id) dbass.associate_extracted_sources(image.id, deRuiter_r=deRuiter_radius) dbmon.add_nulldetections(image.id) transients = steps.transient_search.search_transients(image.id, tr_parset) dbmon.adjust_transients_in_monitoringlist(image.id, transients) for transient in transients: steps.feature_extraction.extract_features(transient) # ingred.classification.classify(transient, cl_parset) now = datetime.datetime.utcnow() dbgen.update_dataset_process_ts(dataset_id, now)
def run(job_name, supplied_mon_coords=[]): pipe_config = initialize_pipeline_config( os.path.join(os.getcwd(), "pipeline.cfg"), job_name) # get parallelise props. Defaults to multiproc with autodetect num cores parallelise = pipe_config.get('parallelise', {}) distributor = os.environ.get('TKP_PARALLELISE', parallelise.get('method', 'multiproc')) runner = Runner(distributor=distributor, cores=parallelise.get('cores', 0)) debug = pipe_config.logging.debug #Setup logfile before we do anything else log_dir = pipe_config.logging.log_dir setup_log_file(log_dir, debug) job_dir = pipe_config.DEFAULT.job_directory if not os.access(job_dir, os.X_OK): msg = "can't access job folder %s" % job_dir logger.error(msg) raise IOError(msg) logger.info("Job dir: %s", job_dir) db_config = get_database_config(pipe_config.database, apply=True) dump_database_backup(db_config, job_dir) job_config = load_job_config(pipe_config) se_parset = job_config.source_extraction deruiter_radius = job_config.association.deruiter_radius beamwidths_limit = job_config.association.beamwidths_limit new_src_sigma = job_config.transient_search.new_source_sigma_margin all_images = imp.load_source('images_to_process', os.path.join(job_dir, 'images_to_process.py')).images logger.info("dataset %s contains %s images" % (job_name, len(all_images))) logger.info("performing database consistency check") if not dbconsistency.check(): logger.error("Inconsistent database found; aborting") return 1 dataset_id = create_dataset(job_config.persistence.dataset_id, job_config.persistence.description) if job_config.persistence.dataset_id == -1: store_config(job_config, dataset_id) # new data set if supplied_mon_coords: dbgen.insert_monitor_positions(dataset_id,supplied_mon_coords) else: job_config_from_db = fetch_config(dataset_id) # existing data set if check_job_configs_match(job_config, job_config_from_db): logger.debug("Job configs from file / database match OK.") else: logger.warn("Job config file has changed since dataset was " "first loaded into database. ") logger.warn("Using job config settings loaded from database, see " "log dir for details") job_config = job_config_from_db if supplied_mon_coords: logger.warn("Monitor positions supplied will be ignored. " "(Previous dataset specified)") dump_configs_to_logdir(log_dir, job_config, pipe_config) logger.info("performing persistence step") image_cache_params = pipe_config.image_cache imgs = [[img] for img in all_images] rms_est_sigma = job_config.persistence.rms_est_sigma rms_est_fraction = job_config.persistence.rms_est_fraction metadatas = runner.map("persistence_node_step", imgs, [image_cache_params, rms_est_sigma, rms_est_fraction]) metadatas = [m[0] for m in metadatas if m] logger.info("Storing images") image_ids = store_images(metadatas, job_config.source_extraction.extraction_radius_pix, dataset_id) db_images = [Image(id=image_id) for image_id in image_ids] logger.info("performing quality check") urls = [img.url for img in db_images] arguments = [job_config] rejecteds = runner.map("quality_reject_check", urls, arguments) good_images = [] for image, rejected in zip(db_images, rejecteds): if rejected: reason, comment = rejected steps.quality.reject_image(image.id, reason, comment) else: good_images.append(image) if not good_images: logger.warn("No good images under these quality checking criteria") return grouped_images = group_per_timestep(good_images) timestep_num = len(grouped_images) for n, (timestep, images) in enumerate(grouped_images): msg = "processing %s images in timestep %s (%s/%s)" logger.info(msg % (len(images), timestep, n+1, timestep_num)) logger.info("performing source extraction") urls = [img.url for img in images] arguments = [se_parset] extraction_results = runner.map("extract_sources", urls, arguments) logger.info("storing extracted sources to database") # we also set the image max,min RMS values which calculated during # source extraction for image, results in zip(images, extraction_results): image.update(rms_min=results.rms_min, rms_max=results.rms_max, detection_thresh=se_parset['detection_threshold'], analysis_thresh=se_parset['analysis_threshold']) dbgen.insert_extracted_sources(image.id, results.sources, 'blind') logger.info("performing database operations") for image in images: logger.info("performing DB operations for image %s" % image.id) logger.info("performing source association") dbass.associate_extracted_sources(image.id, deRuiter_r=deruiter_radius, new_source_sigma_margin=new_src_sigma) expiration = job_config.source_extraction.expiration all_fit_posns, all_fit_ids = steps_ff.get_forced_fit_requests(image, expiration) if all_fit_posns: successful_fits, successful_ids = steps_ff.perform_forced_fits( all_fit_posns, all_fit_ids, image.url, se_parset) steps_ff.insert_and_associate_forced_fits(image.id,successful_fits, successful_ids) dbgen.update_dataset_process_end_ts(dataset_id) logger.info("calculating variability metrics") execute_store_varmetric(dataset_id)
def get_pipe_config(job_name): return initialize_pipeline_config( os.path.join(os.getcwd(), "pipeline.cfg"), job_name)