Esempio n. 1
0
def test_read_config():
    """Function to unit test the reading of the pipeline's JSON config files"""

    config_file_path = '../../Config/config.json'

    config = config_utils.read_config(config_file_path)

    assert type(config) == type({'a': 1, 'b': 2})
    assert 'proc_data' in config.keys()

    config_file_path = '../../Config/inst_config.json'

    config = config_utils.read_config(config_file_path)

    assert type(config) == type({'a': 1, 'b': 2})
    assert 'instrid' in config.keys()
Esempio n. 2
0
def main():
    print("Reading configuration")

    config = read_config()

    print("Fetching data")

    raw_epc_data = get_epc_data(config.epc_data_url, True)

    print("Cleaning data")

    data = clean_data(raw_epc_data, config.data_columns)

    print("Writing data")

    # Wait for postgres to start ... should be handled at the infra level
    time.sleep(5)

    pg = Postgres(config)

    pg.write_data(data, config.postgres_table_name)

    print("Verifying data")

    print(pg.read_data())

    print("Complete")
Esempio n. 3
0
def test_set_config_value():

    test_key = 'proc_data'
    new_value = '/Users/test/path'

    config_file_path = path.join(TEST_DATA, 'config.json')
    copyfile('../../Config/config.json', config_file_path)

    init_config = config_utils.read_config(config_file_path)

    status = config_utils.set_config_value(config_file_path, test_key,
                                           new_value)

    updated_config = config_utils.read_config(config_file_path)

    assert updated_config[test_key]['value'] == new_value

    remove(config_file_path)
Esempio n. 4
0
    def setup_options(self):
        # setup the list of COMPorts
        self.set_listofcomports(get_list_ports())

        # Handel Config
        try:
            config_file = config_utils.read_config()
        except Exception as e:
            print(e)
            config_file = False

        if config_file:
            # set up log bacse on config
            if 'log_dir' in config_file and config_file['log_dir']:
                if not os.path.exists(config_file['log_dir']):
                    os.makedirs(config_file['log_dir'])
                self.ui.LogDir.setText(config_file['log_dir'])
            else:
                self.ui.LogDir.setText(self.make_log_dir())
            # set up baudrate base on config
            if 'baudrate' in config_file and config_file['baudrate']:
                self.ui.Baudrate.setCurrentIndex(
                    config_utils.get_baudrate_index(config_file['baudrate']))

            # set up font base on config
            if 'font' in config_file and config_file['font']:
                self.ui.fontComboBox_2.setCurrentIndex(
                    self.ui.fontComboBox_2.findText(config_file['font']))
            if 'font_size' in config_file and config_file['font_size']:
                self.ui.FontSize.setValue(config_file['font_size'])
            # setup nightmode and log base on config
            # night mode:
            if 'nightmode' in config_file:
                if not config_file['nightmode']:
                    self.ui.checkBox_2.setChecked(False)
            # config setting
            if 'logenabled' in config_file:
                if not config_file['logenabled']:
                    self.ui.checkBox.setChecked(False)

            if 'log_rotaion_enabled' in config_file:
                if not config_file['log_rotaion_enabled']:
                    self.ui.LogRotation.setChecked(False)

            if 'log_rotation_size' in config_file:
                if isinstance(config_file['log_rotation_size'], int):
                    self.ui.LogSize.setValue(config_file['log_rotation_size'])

        else:
            self.ui.LogDir.setText(self.make_log_dir())
            self.ui.Baudrate.setCurrentIndex(0)
            self.ui.checkBox_2.setChecked(True)
            self.ui.checkBox.setChecked(True)

        # Setup defaul log name
        self.ui.LogName.setText(self.make_log_name())
Esempio n. 5
0
    def show_config(self):
        msg = QMessageBox()
        msg.setStandardButtons(QMessageBox.Ok | QMessageBox.RestoreDefaults
                               | QMessageBox.Cancel)
        if config_utils.read_config():
            string_to_show = "Current Settings Saved:\n\n" + str(
                config_utils.read_config()).replace(",", "\n")
            msg.setDetailedText("Configuration File:\n{0}\n{1}".format(
                config_utils.get_path_config_file(), string_to_show))
        else:
            msg.setDetailedText(
                "No Config file found.\nSave Current config to start.")

        current_logpath = self.get_log_dir()
        current_baudrate = self.get_baud_rate()
        current_font = self.get_font()
        current_font_size = str(self.get_font_size())
        if self.get_night_mode():
            current_nightmode = "On"
        else:
            current_nightmode = "Off"
        if self.get_logenabled():
            current_logsetting = "On"
        else:
            current_logsetting = "Off"
        if self.get_logrotation():
            current_logrotation = "On"
            log_rotation_size = self.get_logrotationsize()
        else:
            current_logrotation = "off"
            log_rotation_size = "-"
        msg.setWindowTitle("Current Configuration:")
        msg.setText(
            "Log Path: {0}\nBaudrate: {1}\nFont: {2}-{5}\nNight Mode: {3}\nLogging: {4}\nLog Rotation: {6} at {7} MB"
            .format(current_logpath, current_baudrate, current_font,
                    current_nightmode, current_logsetting, current_font_size,
                    current_logrotation, log_rotation_size))
        #msg.buttonClicked.connect(save_config_handler)
        msg.buttonClicked.connect(self.save_config_handler)
        retval = msg.exec_()
def generate_map():
    """Function to plot given pixel positions on a HEALpix map"""

    # Get user optional parameters
    options = get_args()

    # Fetch configuration:
    config = config_utils.read_config(options['config_file'])

    # Initialze the HEALpix map
    #ahp = HEALPix(nside=NSIDE, order='ring', frame=TETE())

    # Load data on the locations of regions of interest depending on user selections:
    if options['object_list'] == 'O':
        regions = load_open_cluster_data(config)
    elif options['object_list'] == 'G':
        regions = load_globular_cluster_data(config)
    elif options['object_list'] == 'M':
        regions = load_Magellenic_Cloud_data(config)
    elif options['object_list'] == 'GB':
        regions = load_Galactic_Bulge_data(config)
    elif options['object_list'] == 'GP':
        regions = load_Galactic_Plane_data(config)
    elif options['object_list'] == 'C':
        regions = load_Clementini_region_data(config)
    elif options['object_list'] == 'B':
        regions = load_Bonito_SFR_data(config)
    elif options['object_list'] == 'Z':
        regions = load_SFR_data(config)
    elif options['object_list'] == 'P':
        regions = load_optimized_pencilbeams(config)
    elif options['object_list'] == 'LP':
        regions = load_larger_pencilbeams(config)

    # Use the HEALpix map together with the regions of interest to calculate the
    # HEALpixels included within those regions:
    print('Generating sky map...')
    for r in regions:
        r.calc_hp_healpixels_for_region()

    # Build a map combining the pixel regions of all regions of interest:
    maps = build_sky_map(config, options, regions)

    # Output the map data:
    output_sky_map(config, maps, options)
Esempio n. 7
0
def reset_module():
	options = config_utils.read_config("./monk_adapter.conf","MonkAccount")
	print options
	monk_adapter.remove_all_data(options['accountid'],options['groupid01'])
	options['groupid01'] = monk_adapter.add_group(options['accountid'],"group01","dataCollection01")
	monk_adapter.remove_all_data(options['accountid'],options['groupid02'])
	options['groupid02'] = monk_adapter.add_group(options['accountid'],"group02","dataCollection02")

	#reCreate Categories
	like_category_id = monk_adapter.add_category(options['groupid01'],'like')
	unlike_category_id = monk_adapter.add_category(options['groupid01'],'unlike')

	s_like_category_id = monk_adapter.add_category(options['groupid02'],'like')
	s_unlike_category_id = monk_adapter.add_category(options['groupid02'],'unlike')


	pairs = {'groupid01':options['groupid01'],'category_like':like_category_id,'category_unlike':unlike_category_id,
		'groupid02':options['groupid02'],'s_category_like':s_like_category_id,'s_category_unlike':s_unlike_category_id}

	config_utils.write_config("./monk_adapter.conf","MonkAccount",pairs)
Esempio n. 8
0
                      best_curriculum_coefficient, best_saver, sess,
                      test_trajectories_dir, trainer, level):
    print_and_log('end of level {} best: {} from step: {}'.format(
        level, best_cost, best_cost_global_step))
    restore_best(sess, best_saver, best_curriculum_coefficient, trainer)
    # test all
    test_trajectories_file = os.path.join(test_trajectories_dir,
                                          'level{}_all.txt'.format(level))
    endpoints_by_path = trainer.collect_test_data(level,
                                                  is_challenging=False)[-1]
    serialize_compress(endpoints_by_path, test_trajectories_file)
    print_and_log(os.linesep)
    # test hard
    test_trajectories_file = os.path.join(
        test_trajectories_dir, 'level{}_challenging.txt'.format(level))
    endpoints_by_path = trainer.collect_test_data(level,
                                                  is_challenging=True)[-1]
    serialize_compress(endpoints_by_path, test_trajectories_file)
    print_and_log(os.linesep)


def restore_best(sess, best_saver, best_curriculum_coefficient, trainer):
    best_saver.restore(sess)
    trainer.episode_runner.curriculum_coefficient = best_curriculum_coefficient


if __name__ == '__main__':
    # read the config
    config = read_config()
    run_for_config(config)
Esempio n. 9
0
import config_utils

data = config_utils.read_config()
print(data["SERVER_URL"])
Esempio n. 10
0
              config['wa_notebook']['notebook_fname'] + '\nreturn code: ' +
              str(proc.returncode))
        exit(1)

    # copy output files to results dir
    for fname in config['wa_notebook']['result_fnames']:
        src = os.path.join(config['wa_notebook']['dirname'], fname)
        dst = os.path.join(config['results_dirname'], fname)
        copyfile(src, dst)


def run(config):
    '''
    execute analysis 
    :param config: 
    :return: 
    '''
    df_stats, df_all_folds = combine_wcslp_results(config)
    prepare_wa_improve_input_file(df_all_folds, config)
    run_wa_improve_notebook(config)


if __name__ == '__main__':

    config_file_name = 'config.json'
    config = config_utils.read_config(config_file_name)

    df_stats, df_all_folds = combine_wcslp_results(config)
    prepare_wa_improve_input_file(df_all_folds, config)
    run_wa_improve_notebook(config)
def build_priority_maps():

    # Fetch configuration:
    use_hp_maps = True
    if len(argv) < 2:
        config_file = input('Please enter the path to the configuration file: ')
    else:
        config_file = argv[1]
    config = config_utils.read_config(config_file)

    # Initialze the HEALpix map and storage array - this includes an extra
    # column for the combined_map and the maps summed over all filters
    #ahp = HEALPix(nside=config['NSIDE'], order='ring', frame=TETE())
    NPIX = hp.nside2npix(config['NSIDE'])

    # Build vote maps in each filter based on the stellar density of each HEALpix
    vote_maps = {}
    map_titles = ['combined_map']
    for code in config['map_codes']:
        map_title = config[code]['file_root_name']
        map_titles.append(map_title)

    for filter in config['filter_list']:
        map = PriorityMap(npix=NPIX,filter=filter)

        # Read in component maps for different science cases and add them
        # to the combined map for this filter
        for code in config['map_codes']:
            file_name = path.join(config['output_dir'],
                                    config[code]['file_root_name']+'_'+str(filter)+'.fits')

            # map.map_dict contains the original science maps with the
            # computed priority per-healpix, since this is calculated on
            # various different scientific grounds
            if use_hp_maps:
                pix_data = hp.read_map(file_name)
                map_title = config[code]['file_root_name']
            else:
                hdul = fits.open(file_name)
                pix_data = hdul[1].data['HEALpix_values']
                map_title = hdul[0].header['MAPTITLE']

            # vote_map contains the sum of all of the maps.
            # Note that no renormalization is performed at this stage because
            # this has been done for the maps generated by generate_sky_maps.
            map.vote_map += pix_data
            map.map_dict[map_title] = pix_data

        # The final vote_map for this filter is normalized to 1.
        map.vote_map = map.vote_map/map.vote_map.max()

        vote_maps[filter] = map

    # Sum the priority per HEALpix of each science map over all filters
    summed_maps = {}
    for map_title in map_titles:
        sum_map = np.zeros(NPIX)
        for filter in config['filter_list']:
            if map_title == 'combined_map':
                sum_map += vote_maps[filter].vote_map
            else:
                sum_map += vote_maps[filter].map_dict[map_title]
        summed_maps[map_title] = sum_map

    # Output the priority map plots in both PNG and FITS formats, and the data
    # as a FITS binary table

    for filter in config['filter_list']:
        map = vote_maps[filter]

        fig = plt.figure(3,(10,10))
        plot_max = map.vote_map.max()
        if np.isnan(plot_max):
            plot_max = 1.0
        hp.mollview(map.vote_map, title="Regions of Interest "+str(filter)+"-band",
                    min=0.0, max=plot_max)
        hp.graticule()
        plt.tight_layout()
        plt.savefig(path.join(config['output_dir'],config['output_file_name']+'_plot_'+str(filter)+'.png'))
        plt.close(3)
        hp.write_map(path.join(config['output_dir'],config['output_file_name']+'_plot_'+str(filter)+'.fits'), map.vote_map, overwrite=True)

        # Header
        hdr = fits.Header()
        hdr['NSIDE'] = config['NSIDE']
        hdr['NPIX'] = hp.nside2npix(config['NSIDE'])
        hdr['MAPTITLE'] = 'Combined priority survey footprint'
        for i,map_title in enumerate(map_titles):
            hdr['SCIMAP'+str(i)] = map_title
        hdr['VERSION'] = '1.1.0'
        phdu = fits.PrimaryHDU(header=hdr)

        hdu_list = [phdu]

        # First table extension: combined vote map
        col_list = []
        col_list.append( fits.Column(name='combined_map', array=map.vote_map, format='E') )

        # Subsequent extensions contain the component maps for different science cases:
        for map_title, data in map.map_dict.items():
            col_list.append( fits.Column(name=map_title, array=data, format='E') )

        hdu = fits.BinTableHDU.from_columns(col_list)
        hdu_list.append(hdu)
        hdul = fits.HDUList(hdu_list)

        hdul.writeto(path.join(config['output_dir'],
                                config['output_file_name']+'_data_'+str(filter)+'.fits'),
                                overwrite=True)

    # Output summed maps
    hdr = fits.Header()
    hdr['NSIDE'] = config['NSIDE']
    hdr['NPIX'] = hp.nside2npix(config['NSIDE'])
    hdr['MAPTITLE'] = 'Total priority survey footprint'
    hdr['VERSION'] = '1.1.0'
    phdu = fits.PrimaryHDU(header=hdr)
    hdu_list = [phdu]

    col_list = []
    for map_title in map_titles:
        col_list.append( fits.Column(name=map_title, array=summed_maps[map_title], format='E') )

    hdu = fits.BinTableHDU.from_columns(col_list)
    hdu_list.append(hdu)
    hdul = fits.HDUList(hdu_list)
    hdul.writeto(path.join(config['output_dir'],
                            config['output_file_name']+'_data_sum.fits'),
                            overwrite=True)
Esempio n. 12
0
        close_log()
        return best_cost


def end_of_level_test(best_cost, best_cost_global_step, best_curriculum_coefficient, best_saver, sess,
                      test_trajectories_dir, trainer):
    print_and_log('end of run. best: {} from step: {}'.format(best_cost, best_cost_global_step))
    restore_best(sess, best_saver, best_curriculum_coefficient, trainer)
    # test all
    test_trajectories_file = os.path.join(test_trajectories_dir, 'all.txt')
    endpoints_by_path = trainer.collect_test_data(is_challenging=False)[-1]
    serialize_compress(endpoints_by_path, test_trajectories_file)
    print_and_log(os.linesep)
    # test hard
    test_trajectories_file = os.path.join(test_trajectories_dir, 'challenging.txt')
    endpoints_by_path = trainer.collect_test_data(is_challenging=True)[-1]
    serialize_compress(endpoints_by_path, test_trajectories_file)
    print_and_log(os.linesep)


def restore_best(sess, best_saver, best_curriculum_coefficient, trainer):
    best_saver.restore(sess)
    trainer.episode_runner.curriculum_coefficient = best_curriculum_coefficient


if __name__ == '__main__':
    # read the config
    config_path = os.path.join(get_config_directory(), 'config_subgoal_sequential.yml')
    config = read_config(config_path)
    run_for_config(config)