Beispiel #1
0
sgrA_dist = 7861.2597 * 3.086e+18  # Distance to Sgr A* from pc to cm
beam_size = 0.37  # Beam size in arcsec
x_pix, y_pix = [], []
mean_T, mean_n, mean_N_SIO, mean_N_SO = [], [], [], []

# Define the datafile
datafile = "{0}/data/tabula-sio_intratio.csv".format(DIREC)

# Read in the whole data file containing sources and source flux
with open(datafile, newline='') as f:
    reader = csv.reader(f)
    data = list(reader)
    f.close()

# Read in the correct config file from the database
config_file = config.config_file(db_init_filename='database_archi.ini',
                                 section='radex_fit_results')
bestfit_config_file = config.config_file(db_init_filename='database_archi.ini',
                                         section='radex_bestfit_conditions')

db_pool = db.dbpool(config_file)
db_bestfit_pool = db.dbpool(bestfit_config_file)

# Parse the data to a dict list
observed_data = workerfunctions.parse_data(data, db_pool, db_bestfit_pool)

# Filter the observed data to contain only those species that we can use
# (normally limited by those with Radex data)
filtered_data = workerfunctions.filter_data(observed_data,
                                            ["SIO", "SO", "OCS", "H2CS"])

# Read the image data from Farhad
Beispiel #2
0
def main():
    config.read_json_config(config.config_file())
    twisted()
Beispiel #3
0
        'g_u': 15.0,
        'A_ul': 10**(-2.83461),
        'E_u': 58.34783,
        'Z': [72.3246, 2*480.8102]
    }

    so_data = {
        'T': [100., 1000.],
        'g_u': 17.0,
        'A_ul': 10**(-1.94660),
        'E_u': 62.14451,
        'Z': [197.515, 2*850.217] # From Splatalogue (https://www.cv.nrao.edu/php/splat/species_metadata_displayer.php?species_id=20)
    }

    # Declare the database connections
    config_file = config.config_file(db_init_filename='database.ini', section='postgresql')
    bestfit_config_file = config.config_file(db_init_filename='database.ini', section='bestfit_conditions')

    # Set up the connection pools
    db_pool = db.dbpool(config_file)
    db_bestfit_pool = db.dbpool(bestfit_config_file)

    # Read in the whole data file containing sources and source flux
    with open(datafile, newline='') as f:
        reader = csv.reader(f)
        data = list(reader)
        f.close()

    observed_data = workerfunctions.parse_data(data, db_pool, db_bestfit_pool)
    
    '''
Beispiel #4
0
 def test_check_config_file(self):
         #check that the config_file returns the appropriate value
         assert config.config_file()==os.path.join(config.config_folder(),"cnf.yml")
Beispiel #5
0
import requests
from config import config_file

texts = []
languages = ['de', 'es', 'fr']
text_files = ['directories/DE.txt', 'directories/ES.txt', 'directories/FR.txt']
for i in text_files:
    with open(i, 'r') as file:
        texts.append(file.read())

API_KEY = config_file()  # API Key
URL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'


def translate_it(text, to_lang):
    params = {
        'key': API_KEY,
        'text': text,
        'lang': f'ru-{to_lang}',
    }

    response = requests.get(URL, params=params)

    return ''.join(response.json()['text'])


# print(translate_it('В настоящее время доступна единственная опция — признак включения в ответ автоматически определенного языка переводимого текста. Этому соответствует значение 1 этого параметра.', 'no'))

if __name__ == '__main__':
    for j in languages:
        for g in texts:
Beispiel #6
0
    so_data = {
        'T': [100., 1000.],
        'g_u': 17.0,
        'A_ul': 10**(-1.94660),
        'E_u': 62.14451,
        'Z': [
            197.515, 2 * 850.217
        ]  # From Splatalogue (https://www.cv.nrao.edu/php/splat/species_metadata_displayer.php?species_id=20)
    }

    # Declare the species that we're interested in
    relevant_species = ["SIO", "SO", "OCS", "H2CS"]

    # Declare the database connections
    config_file = config.config_file(db_init_filename='database.ini',
                                     section='static_fit_results')
    bestfit_config_file = config.config_file(db_init_filename='database.ini',
                                             section='bestfit_conditions')

    # Set up the connection pools
    db_pool = db.dbpool(config_file)
    db_bestfit_pool = db.dbpool(bestfit_config_file)

    # Read in the whole data file containing sources and source flux
    with open(datafile, newline='') as f:
        reader = csv.reader(f)
        data = list(reader)
        f.close()

    # Parse the data to a dict list
    observed_data = workerfunctions.parse_data(data, db_pool, db_bestfit_pool)