def test_run_todo_file_data_source( caps_mock, ad_mock, data_client_mock, set_clients_mock, test_config ): set_clients_mock.side_effect = _clients_mock caps_mock.return_value = 'https://sc2.canfar.net/sc2repo' response = Mock() response.status_code = 200 response.iter_content.return_value = [b'fileName\n'] ad_mock.return_value.__enter__.return_value = response data_client_mock.return_value = SimpleObservation( collection=test_config.collection, observation_id='def', algorithm=Algorithm(str('test')) ) if os.path.exists(test_config.success_fqn): os.unlink(test_config.success_fqn) test_config.work_fqn = f'{TEST_DIR}/todo.txt' test_config.task_types = [mc.TaskType.VISIT] test_config.log_to_file = True test_chooser = ec.OrganizeChooser() test_result = rc.run_by_todo( config=test_config, chooser=test_chooser, command_name=TEST_COMMAND ) assert test_result is not None, 'expect a result' assert test_result == 0, 'expect success' assert os.path.exists(test_config.success_fqn), 'expect success file' with open(test_config.success_fqn) as f: content = f.read() # the obs id and file name assert 'def def.fits' in content, 'wrong success message'
def change_to_simple(observation): """For the case where a DerivedObservation needs to become a SimpleObservation.""" temp = SimpleObservation( observation.collection, observation.observation_id, Algorithm('exposure'), observation.sequence_number, observation.intent, observation.type, observation.proposal, observation.telescope, observation.instrument, observation.target, observation.meta_release, observation.meta_read_groups, observation.planes, observation.environment, observation.target_position, ) temp.last_modified = datetime.utcnow() temp._id = observation._id return temp
def _mock_read(ignore_fqn): return SimpleObservation( collection='test_collection', observation_id='ghi', algorithm=Algorithm(str('test')), )
def update(observation, **kwargs): """Called to fill multiple CAOM model elements and/or attributes (an n:n relationship between TDM attributes and CAOM attributes). Must have this signature for import_module loading and execution. :param observation A CAOM Observation model instance. :param **kwargs Everything else.""" logging.debug('Begin update.') mc.check_param(observation, Observation) headers = kwargs.get('headers') fqn = kwargs.get('fqn') uri = kwargs.get('uri') gem_proc_name = None # ok not to use builder here, since the obs_id value is never used later if uri is not None: temp = mc.CaomName(uri).file_name gem_proc_name = builder.GemProcName(entry=temp) if fqn is not None: gem_proc_name = builder.GemProcName(entry=fqn) if gem_proc_name is None: raise mc.CadcException(f'Need one of fqn or uri defined for ' f'{observation.observation_id}') for plane in observation.planes.values(): if plane.product_id != gem_proc_name.product_id: continue for artifact in plane.artifacts.values(): for part in artifact.parts.values(): idx = mc.to_int(part.name) header = headers[idx] extname = header.get('EXTNAME') # DB 22-07-20 # There are a few other EXTNAME values to look at for # part.ProductType. MDF values would be ‘AUXILIARY’. The # ones currently called “CAL” are likely best set to ‘INFO’ # since it contains info about datasets used to produce the # product. # # DB 07-08-20 # EXTNAME in (‘DQ’, ‘VAR’) should both have # ProductType.NOISE. ‘CAL’ should no longer exist - it’s now # BPM. Default type is 'AUXILIARY', 'SCI' is type 'SCIENCE' if extname == 'SCI': part.product_type = ProductType.SCIENCE elif extname in ['DQ', 'VAR']: part.product_type = ProductType.NOISE else: part.product_type = ProductType.AUXILIARY if part.product_type in [ ProductType.SCIENCE, ProductType.INFO, ]: for chunk in part.chunks: filter_name = headers[0].get('FILTER').split('_')[0] _update_energy( chunk, headers[idx], filter_name, observation.observation_id, ) _update_time(part, chunk, headers[0], observation.observation_id) if part.product_type == ProductType.SCIENCE: _update_spatial_wcs( part, chunk, headers, observation.observation_id, ) chunk.naxis = header.get('NAXIS') if (chunk.position is None and chunk.naxis is not None): chunk.naxis = None if (chunk.time is not None and chunk.time.axis is not None and chunk.time.axis.function is not None and chunk.time.axis.function.delta == 1.0): # these are the default values, and they make # the time range start in 1858 chunk.time = None else: # DB 21-07-20 # ignore WCS information unless product type == SCIENCE while len(part.chunks) > 0: del part.chunks[-1] if (observation.proposal is not None and observation.proposal.id is not None and observation.proposal.pi_name is None): program = program_metadata.get_pi_metadata(observation.proposal.id) if program is not None: observation.proposal.pi_name = program.get('pi_name') observation.proposal.title = program.get('title') if isinstance(observation, SimpleObservation): # undo the observation-level metadata modifications for updated # Gemini records observation.algorithm = Algorithm(name='exposure') else: cc.update_observation_members(observation) logging.debug('Done update.') return observation
def _mock_repo_read_not_none(arg1, arg2): return SimpleObservation( observation_id='TEST_OBS_ID', collection='TEST', algorithm=Algorithm(name='exposure'), instrument=Instrument(name=metadata.Inst.MEGAPRIME.value))
def _write_obs_mock(): args = get_gen_proc_arg_parser().parse_args() obs = SimpleObservation(collection=args.observation[0], observation_id=args.observation[1], algorithm=Algorithm(name='exposure')) mc.write_obs_to_file(obs, args.out_obs_xml)
def _build_obs(): return SimpleObservation( collection='TEST', observation_id='C121212_domeflat_K_CALRED', algorithm=Algorithm(name='test'), )
def _read_obs(arg1): return SimpleObservation( collection='test_collection', observation_id='test_obs_id', algorithm=Algorithm(str('exposure')), )
def test_run_state_log_to_file_true( fits2caom2_mock, tap_mock, clients_mock, tap_mock2, test_config, ): # tap_mock2 is needed by the data_source_composable specialization # this test is about making sure the summary .txt files are copied # as expected when there is more than one time-box pattern = None try: clients_mock.return_value.metadata_client.read.side_effect = Mock( return_value=SimpleObservation( collection=test_config.collection, observation_id='def', algorithm=Algorithm(str('test')), )) fits2caom2_mock.side_effect = _mock_write tap_mock.side_effect = _mock_get_work test_end_time = datetime.fromtimestamp(1579740838) start_time = test_end_time - timedelta(seconds=900) _write_state(start_time) test_config.task_types = [mc.TaskType.INGEST] test_config.log_to_file = True test_config.state_fqn = STATE_FILE test_config.interval = 10 pattern = f'{test_config.success_fqn.split(".")[0]}*' if os.path.exists(test_config.progress_fqn): os.unlink(test_config.progress_fqn) # preconditions for the success file: - one file named pattern.txt # original_success_files = glob.glob(pattern) for entry in original_success_files: os.unlink(entry) if not os.path.exists(test_config.success_fqn): with open(test_config.success_fqn, 'w') as f: f.write('test content\n') test_chooser = ec.OrganizeChooser() test_result = rc.run_by_state( config=test_config, chooser=test_chooser, command_name='collection2caom2', bookmark_name=TEST_BOOKMARK, end_time=test_end_time, ) assert test_result is not None, 'expect a result' assert test_result == 0, 'expect success' assert os.path.exists(test_config.progress_fqn), 'expect progress file' file_count = glob.glob(pattern) assert len(file_count) == 2, 'wrong number of success files' finally: if pattern is not None: original_success_files = glob.glob(pattern) for entry in original_success_files: os.unlink(entry)
def _build_obs(override, db_content, fqn, index, almaca_name, md_name): obs_date = db_content['Observation date'][index] if obs_date is None: raise mc.CadcException('No observation date for {}'.format(fqn)) else: obs_date = time.Time(obs_date).to_datetime() # of_site('alma') # 2225015.30883296, -5440016.41799762, -2481631.27428014 # size = db_content['Array'][index] telescope = Telescope(name="ALMA-{}".format(size), geo_location_x=2225142.18, geo_location_y=-5440307.37, geo_location_z=-2481029.852) instrument = Instrument(name=_get_band_name(override)) # HK - 14-08-19 # target: this should be the science target / science fieldname and # not the calibrator fieldname, as it is presently (i.e., should be # J1851+0035, not J1924-2914). Or, we may need to continue to leave # that field blank at this level. A single calibrated measurement set # may contain multiple science target names, which would not be # properly captured at this level. [For the raw data, the target # field is left blank] target = Target(name=override.get('field'), standard=False, moving=False, target_type=TargetType.OBJECT) # HK - 07-02-20 # Since we've changed what the base level observation is and can now list # a target name in the 'Derived Observation' base plane, I believe that # means we can also include that target's position under targetPosition. # You've already pulled this info out for the subsequent levels of the # hierarchy under 'position', so it's presumably fairly straightforward to # include the same info here. result_ra, result_dec = _get_ra_dec(md_name) point = Point(result_ra, result_dec) target_position = TargetPosition( coordinates=point, coordsys='ICRS', # from listobs output equinox=2000.0) # a guess by google # db_content as votable: # >>> t.colnames # ['Project_code', 'Source_name', 'RA', 'Dec', 'Galactic_longitude', # 'Galactic_latitude', 'Band', 'Spatial_resolution', # 'Frequency_resolution', 'Array', 'Mosaic', 'Integration', # 'Release_date', 'Frequency_support', 'Velocity_resolution', # 'Pol_products', 'Observation_date', 'PI_name', 'SB_name', # 'Proposal_authors', 'Line_sensitivity__10_km_s_', # 'Continuum_sensitivity', 'PWV', 'Group_ous_id', 'Member_ous_id', # 'Asdm_uid', 'Project_title', 'Project_type', 'Scan_intent', # 'Field_of_view', 'Largest_angular_scale', 'QA2_Status', 'COUNT', # 'Science_keyword', 'Scientific_category', 'ASA_PROJECT_CODE'] # # db_content.colnames as html: # # ['Project code', 'Source name', 'RA', 'Dec', 'Galactic longitude', # 'Galactic latitude', 'Band', 'Spatial resolution', # 'Frequency resolution', 'Array', 'Mosaic', 'Integration', # 'Release date', 'Frequency support', 'Velocity resolution', # 'Pol products', 'Observation date', 'PI name', 'SB name', # 'Proposal authors', 'Line sensitivity (10 km/s)', # 'Continuum sensitivity', 'PWV', 'Group ous id', 'Member ous id', # 'Asdm uid', 'Project title', 'Project type', 'Scan intent', # 'Field of view', 'Largest angular scale', 'QA2 Status', 'COUNT', # 'Science keyword', 'Scientific category', 'ASA_PROJECT_CODE'] # HK - 14-08-19 # can we include the project code, and not just the observation UID # somewhere in here? For the raw data, it looks like the project # code was included as 'proposal: ID', whereas for the calibrated # measurement set, proposal: ID is now set to the UID and the project # code (2016.1.00010.S) is not captured anywhere. Could the 'project' # field, currently set as 'null' be used for this? proposal = Proposal(id=db_content['Project code'][index], project=override.get('project'), pi_name=db_content['PI name'][index], title=db_content['Project title'][index]) keywords = db_content['Science keyword'][index] if keywords is not None: proposal.keywords = set(keywords.split()) environment = Environment() environment.tau = db_content['PWV'][index] / 0.935 + 0.35 environment.wavelength_tau = 350 * units.um.to(units.meter) intent = (ObservationIntentType.SCIENCE if almaca_name.intent is ProductType.SCIENCE else ObservationIntentType.CALIBRATION) algorithm = Algorithm(name='single band split') # # PD, SG 15-08-19 # make it a composite, algorithm name something like # 'target splitting' # observation = DerivedObservation(collection=ARCHIVE, observation_id=almaca_name.obs_id, sequence_number=None, intent=intent, type="OBJECT", proposal=proposal, telescope=telescope, instrument=instrument, target=target, meta_release=obs_date, algorithm=algorithm, environment=environment, target_position=target_position) observation.members.add( ObservationURI( mc.CaomName.make_obs_uri_from_obs_id('ALMA', 'A001_X88b_X23'))) return observation