Пример #1
0
def set_language(language):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    config.add_section('language')
    config.set('language', 'code', language)
    config.set('language', 'name', lang.LANG_DICT[int(language)])
    with open(utils.get_config_file(), 'wb') as configfile:
        config.write(configfile)
Пример #2
0
def set_editor(editor):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if not config.has_section('editor'):
        config.add_section('editor')
    config.set('editor','editor', editor)
    with open(utils.get_config_file(), 'wb') as configfile:
        config.write(configfile)
Пример #3
0
def set_extension(lang_code,extension):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if not config.has_section('extension'):
        config.add_section('extension')
    config.set('extension',lang_code, extension)
    with open(utils.get_config_file(), 'wb') as configfile:
        config.write(configfile)
Пример #4
0
def set_run_cmd(lang_code,run_cmd):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if not config.has_section('run_cmd'):
        config.add_section('run_cmd')
    config.set('run_cmd',lang_code, run_cmd)
    with open(utils.get_config_file(), 'wb') as configfile:
        config.write(configfile)
Пример #5
0
def set_learning_rate(learning_rate):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if not config.has_section('learning_rate'):
        config.add_section('learning_rate')
    config.set('learning_rate','learning_rate',learning_rate)
    with open(utils.get_config_file(),'wb') as configfile:
        config.write(configfile)
Пример #6
0
def set_language(language):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if not config.has_section('language'):
        config.add_section('language')
    config.set('language', 'code', language)
    config.set('language', 'name', lang.LANG_DICT[int(language)])
    with open(utils.get_config_file(), 'wb') as configfile:
        config.write(configfile)
Пример #7
0
def set_root(root):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if not config.has_section('root'):
        config.add_section('root')
    config.set('root','dir',root)

    with open(utils.get_config_file(), 'wb') as configfile:
        config.write(configfile)
Пример #8
0
def set_configured():
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    try:
        config.add_section('configured')
    except ConfigParser.DuplicateSectionError:
        pass
    config.set('configured', 'configured', "1")
    with open(utils.get_config_file(), 'wb') as configfile:
        config.write(configfile)
Пример #9
0
def set_credentials(username,password):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    try:
        config.add_section('user')
    except ConfigParser.DuplicateSectionError:
        pass
    config.set('user', 'username', username)
    config.set('user', 'password', utils.encode(password))
    with open(utils.get_config_file(), 'wb') as configfile:
        config.write(configfile)
Пример #10
0
def set_credentials(username, password):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    try:
        config.add_section('user')
    except ConfigParser.DuplicateSectionError:
        pass
    config.set('user', 'username', username)
    config.set('user', 'password', utils.encode(password))
    with open(utils.get_config_file(), 'wb') as configfile:
        config.write(configfile)
Пример #11
0
def get_editor():
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if config.has_option('editor', "editor"):
        return config.get('editor', "editor")
    else:
        return None
Пример #12
0
def get_language():
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if config.has_option('language', 'code'):
        return config.get('language', 'code')
    else:
        return None
Пример #13
0
def get_root():
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if config.has_option('root', 'dir'):
        return config.get('root', 'dir')
    else:
        return None
Пример #14
0
def get_language():
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if config.has_option('language', 'code'):
        return config.get('language', 'code')
    else:
        return None
Пример #15
0
def create_config(book: str):
    config: Dict[str, Any] = {}

    print("Creating new config for {}:".format(book))

    _get_website(config)

    name = input("Name? (optional) ")
    if name:
        config["name"] = name

    with open(utils.get_config_file(book), "w") as f:
        yaml.dump(config, f, default_flow_style=False)

    print("Config created at:", utils.get_config_file(book))
    print()
Пример #16
0
def get_cmp_cmd(lang_code):
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if config.has_option('cmp_cmd', lang_code):
        return config.get('cmp_cmd', lang_code)
    else:
        return None
Пример #17
0
def get_learning_rate():
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if config.has_option('learning_rate', "learning_rate"):
        return config.get('learning_rate', "learning_rate")
    else:
        return None
Пример #18
0
def is_configured():
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if config.has_option('configured', 'configured'):
        return config.get('configured', 'configured')
    else:
        return 0
Пример #19
0
def create_app():
    app = Flask(__name__)
    app.config.from_pyfile(get_config_file(app.root_path))
    Bootstrap(app)
    app.extensions['bootstrap']['cdns']['jquery'] = WebCDN(
        '//cdnjs.cloudflare.com/ajax/libs/jquery/2.1.1/')
    app.secret_key = app.config['SECRET_KEY']
    return app
Пример #20
0
def get_credentials():
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if not config.has_option('user', 'username'):
        return None, None
    if not config.has_option('user', 'password'):
        return None, None
    return config.get('user', 'username'), utils.decode(config.get('user', 'password'))
Пример #21
0
def create_app():
    app = Flask(__name__)
    app.config.from_pyfile(get_config_file(app.root_path))
    Bootstrap(app)
    app.extensions['bootstrap']['cdns']['jquery'] = WebCDN(
        '//cdnjs.cloudflare.com/ajax/libs/jquery/2.1.1/'
    )
    app.secret_key = app.config['SECRET_KEY']
    return app
Пример #22
0
def get_credentials():
    config = ConfigParser.ConfigParser()
    config.read([utils.get_config_file()])
    if not config.has_option('user', 'username'):
        return None, None
    if not config.has_option('user', 'password'):
        return None, None
    return config.get('user',
                      'username'), utils.decode(config.get('user', 'password'))
Пример #23
0
def generate_image():
    arrival_times: Dict[str, ArrivalTimes] = mta.fetch_mta()

    config = utils.get_config_file()

    top_train = build_line_from_config(config["lines"][0], arrival_times)
    bottom_train = build_line_from_config(config["lines"][1], arrival_times)

    drawing.create_image(top_train, bottom_train)
    format_for_kindle.format_for_kindle()
Пример #24
0
    def __init__(self):
        QObject.__init__(self)
        self.config_file = get_config_file("config.ini")
        self.config = None
        self.load()

        self.change_options = []

        self.autostart_desktop = os.path.expanduser("~/.config/autostart/youdao-dict-autostart.desktop")
        self.sys_autostart_desktop = "/etc/xdg/autostart/youdao-dict-autostart.desktop"

        self._autostart = self.get_autostart()
Пример #25
0
    def __init__(self):
        QObject.__init__(self)
        self.config_file = get_config_file("config.ini")
        self.config = None
        self.load()

        self.change_options = []

        self.autostart_desktop = os.path.expanduser(
            "~/.config/autostart/youdao-dict-autostart.desktop")
        self.sys_autostart_desktop = "/etc/xdg/autostart/youdao-dict-autostart.desktop"

        self._autostart = self.get_autostart()
Пример #26
0
def main():
    """
    launches the program
    :return: None
    """
    try:
        session = Session()
        config = get_config_file(CONFIG_FILE)
        map_network(config[NMAP_SECTION])

        agents = start_server_monitoring(config[AGENTS_SECTION], session)

        ping_handler = PingHandler()

        for agent in agents:
            agent.join()

    except Exception as error:
        logging.error("TERMINAL ERROR IN THE MAIN MODULE: %s", error)
        Session.remove()
        engine.dispose()
        sys.exit(-1)
Пример #27
0
def get_angle_config():
    return utils.get_config_file()['kindle']['angle']
Пример #28
0
def run(file, id=None):
    # ----------------- #
    # SETUP #############
    # ----------------- #
    if id is None:
        config = get_config_file(file)
    else:
        config, engine = get_config_db(id)

    for d in [
            '../Data/Features', '../Data/Geofiles/OSM',
            '../Data/Geofiles/nightlights', '../Data/Results'
    ]:
        if not os.path.exists(d):
            os.makedirs(d)

    # --------------------- #
    # Setting up playground #
    # --------------------- #
    assert (
        os.path.exists(config['dataset_filename'])
    ), "ups, dataset specified not found: " + config['dataset_filename']
    data = pd.read_csv(config['dataset_filename'])
    print(str(np.datetime64('now')), 'INFO: original dataset lenght: ',
          data.shape[0])
    data['gpsLongitude'] = np.round(data['gpsLongitude'], 5)
    data['gpsLatitude'] = np.round(data['gpsLatitude'], 5)

    # avoid duplicates
    data = data[['gpsLongitude', 'gpsLatitude',
                 config['indicator']]].groupby(['gpsLongitude',
                                                'gpsLatitude']).mean()

    # base layer
    assert (os.path.exists(config['base_raster'])
            ), "ups, raster specified not found: " + config['base_raster']
    GRID = BaseLayer(config['base_raster'],
                     data.index.get_level_values('gpsLongitude'),
                     data.index.get_level_values('gpsLatitude'))
    # TODO: we should enforce the most accurate i and j when training, i.e. aggregate = 1?

    # Get Polygon Geojson of the boundaries
    # TODO: maybe go into BaseLayer class?
    minlat, maxlat, minlon, maxlon = boundaries(GRID.lat,
                                                GRID.lon,
                                                buffer=0.05)
    area = points_to_polygon(minlon, minlat, maxlon, maxlat)

    print(str(np.datetime64('now')),
          "INFO: Number of clusters: {} ".format(len(data)))

    pipeline = 'evaluation'

    # ------------------------------- #
    # get features from Google images #
    # ------------------------------- #
    if config['satellite_config']['satellite_images'] in ['Y', 'G']:
        features_path = "../Data/Features/features_Google_id_{}_{}.csv".format(
            id, pipeline)
        data_path = "../Data/Satellite/"
        from google_images import GoogleImages

        if os.path.exists(features_path):
            print('INFO: already scored.')
            features = pd.read_csv(features_path.format(id, pipeline),
                                   index_col=['gpsLongitude', 'gpsLatitude'],
                                   float_precision='round_trip')
        else:
            gimages = GoogleImages(data_path)
            # download the images from the relevant API
            gimages.download(GRID.lon,
                             GRID.lat,
                             step=config['satellite_config']['satellite_step'])
            # extract the features
            features = pd.DataFrame(gimages.featurize(
                GRID.lon,
                GRID.lat,
                step=config['satellite_config']['satellite_step']),
                                    index=data.index)

            features.columns = [
                str(col) + '_Google' for col in features.columns
            ]
            features.to_csv(features_path)

        data = data.join(features)
        print('INFO: features extracted from Google satellite images')

    # --------------------------------- #
    # get features from Sentinel images #
    # --------------------------------- #
    if config['satellite_config']['satellite_images'] in ['Y', 'S']:
        features_path = "../Data/Features/features_Sentinel_id_{}_{}.csv".format(
            id, pipeline)
        data_path = "../Data/Satellite/"
        start_date = config["satellite_config"]["start_date"]
        end_date = config["satellite_config"]["end_date"]

        from sentinel_images import SentinelImages

        if os.path.exists(features_path):
            print('INFO: already scored.')
            features = pd.read_csv(features_path.format(id, pipeline),
                                   index_col=['gpsLongitude', 'gpsLatitude'],
                                   float_precision='round_trip')
        else:
            simages = SentinelImages(data_path)
            # download the images from the relevant API
            simages.download(GRID.lon, GRID.lat, start_date, end_date)
            print('INFO: scoring ...')
            # extract the features
            print('INFO: extractor instantiated.')
            features = pd.DataFrame(simages.featurize(GRID.lon, GRID.lat,
                                                      start_date, end_date),
                                    index=data.index)

            features.columns = [
                str(col) + '_Sentinel' for col in features.columns
            ]
            features.to_csv(features_path)

        data = data.join(features)
        print('INFO: features extracted from Sentinel images')

    # --------------- #
    # add nightlights #
    # --------------- #
    from nightlights import Nightlights

    nlights = Nightlights('../Data/Geofiles/')
    nlights.download(area, config['nightlights_date']['start'],
                     config['nightlights_date']['end'])
    features = pd.DataFrame(nlights.featurize(GRID.lon, GRID.lat),
                            columns=['nightlights'],
                            index=data.index)
    # quantize nightlights
    features['nightlights'] = pd.qcut(features['nightlights'],
                                      5,
                                      labels=False,
                                      duplicates='drop')
    data = data.join(features)

    # ---------------- #
    # add OSM features #
    # ---------------- #
    OSM = OSM_extractor(minlon, minlat, maxlon, maxlat)
    tags = {"amenity": ["school", "hospital"], "natural": ["tree"]}
    osm_gdf = {}

    for key, values in tags.items():
        for value in values:
            osm_gdf["value"] = OSM.download(key, value)
            dist = OSM.distance_to_nearest(GRID.lat, GRID.lon,
                                           osm_gdf["value"])
            data['distance_{}'.format(value)] = [
                np.log(0.0001 + x) for x in dist
            ]

    # ---------------- #
    # NDBI, NDVI, NDWI #
    # ---------------- #
    print('INFO: getting NDBI, NDVI, NDWI ...')
    from rms_indexes import S2indexes

    S2 = S2indexes(area, '../Data/Geofiles/NDs/', config['NDs_date']['start'],
                   config['NDs_date']['end'], config['scope'])
    S2.download()
    data['max_NDVI'], data['max_NDBI'], data['max_NDWI'] = S2.rms_values(
        GRID.lon, GRID.lat)

    # --------------- #
    # add ACLED #
    # --------------- #
    from acled import ACLED

    acled = ACLED("../Data/Geofiles/ACLED/")
    acled.download(config['iso3'], config['nightlights_date']['start'],
                   config['nightlights_date']['end'])
    d = {}
    for property in ["fatalities", "n_events", "violence_civ"]:
        for k in [10000, 100000]:
            d[property + "_" + str(k)] = acled.featurize(GRID.lon,
                                                         GRID.lat,
                                                         property=property,
                                                         function='density',
                                                         buffer=k)

    d["weighted_sum_fatalities_by_dist"] = acled.featurize(
        GRID.lon, GRID.lat, property="fatalities", function='weighted_kNN')
    d["distance_to_acled_event"] = acled.featurize(GRID.lon,
                                                   GRID.lat,
                                                   function='distance')
    # quantize ACLED
    for c in d.keys():
        d[c] = np.nan_to_num(pd.qcut(d[c], 5, labels=False, duplicates='drop'))

    features = pd.DataFrame(d, index=data.index)
    data = data.join(features)

    # --------------- #
    # save features   #
    # --------------- #
    # drop columns with only 1 value
    print(
        'INFO: {} columns. Dropping features with unique values (if any) ...'.
        format(len(data.columns)))
    data = data[[col for col in data if not data[col].nunique() == 1]]
    print('INFO: {} columns.'.format(len(data.columns)))
    # features to be use in the linear model
    features_list = list(
        sorted(set(data.columns) - set(['i', 'j', config['indicator']])))

    #Save non-scaled features
    data.to_csv(
        "../Data/Features/features_all_id_{}_evaluation_nonscaled.csv".format(
            config['id']))

    # Scale Features
    print("Normalizing : max")
    data[features_list] = (data[features_list] - data[features_list].mean()
                           ) / (data[features_list].max() + 0.001)
    data.to_csv("../Data/Features/features_all_id_{}_evaluation.csv".format(
        config['id']))

    # --------------- #
    # model indicator #
    # --------------- #
    # shuffle dataset
    data = data.sample(frac=1, random_state=1783)  # shuffle data
    scores_dict = {}  # placeholder to save the scores
    from modeller import Modeller
    X, y = data[features_list].reset_index(), data[config['indicator']]
    modeller = Modeller(X,
                        rs_features=features_list,
                        spatial_features=["gpsLatitude", "gpsLongitude"],
                        scoring='r2',
                        cv_loops=20)

    kNN_pipeline = modeller.make_model_pipeline('kNN')
    kNN_scores = modeller.compute_scores(kNN_pipeline, y)
    scores_dict['kNN_R2_mean'] = round(kNN_scores.mean(), 2)
    scores_dict['kNN_R2_std'] = round(kNN_scores.std(), 2)
    print("kNN_R2_mean: ", scores_dict['kNN_R2_mean'], "kNN_R2_std: ",
          scores_dict['kNN_R2_std'])

    Ridge_pipeline = modeller.make_model_pipeline('Ridge')
    Ridge_scores = modeller.compute_scores(Ridge_pipeline, y)
    scores_dict['ridge_R2_mean'] = round(Ridge_scores.mean(), 2)
    scores_dict['ridge_R2_std'] = round(Ridge_scores.std(), 2)
    print("Ridge_R2_mean: ", scores_dict['ridge_R2_mean'], "Ridge_R2_std: ",
          scores_dict['ridge_R2_std'])

    Ensemble_pipeline = modeller.make_ensemble_pipeline(
        [kNN_pipeline, Ridge_pipeline])
    Ensemble_scores = modeller.compute_scores(Ensemble_pipeline, y)
    scores_dict['ensemble_R2_mean'] = round(Ensemble_scores.mean(), 2)
    scores_dict['ensemble_R2_std'] = round(Ensemble_scores.std(), 2)
    print("Ensemble_R2_mean: ", scores_dict['ensemble_R2_mean'],
          "Ensemble_R2_std: ", scores_dict['ensemble_R2_std'])

    # save results
    if id is None:
        write_scores_to_file(scores_dict, config['id'])
    else:
        write_scores_to_db(scores_dict, config['id'], engine)

    # ------------------------- #
    # write predictions to file #
    # ------------------------- #
    print('INFO: writing predictions to disk ...')

    from sklearn.model_selection import cross_val_predict
    results = pd.DataFrame(
        {
            'yhat': cross_val_predict(Ensemble_pipeline, X.values, y),
            'y': data[config['indicator']].values
        },
        index=data.index)
    results.to_csv('../Data/Results/config_{}.csv'.format(config['id']))

    # save model for production
    Ensemble_pipeline.fit(X.values, y)

    # Best n_neighbors (kNN)
    print('INFO: number of neighbours chosen: ',
          Ensemble_pipeline.regr_[0].named_steps['gridsearchcv'].best_params_)
    # Best alpha (Ridge)
    print('INFO: regularization param chosen: ',
          Ensemble_pipeline.regr_[1].named_steps['gridsearchcv'].best_params_)

    from sklearn.externals import joblib
    joblib.dump(Ensemble_pipeline,
                '../Models/Ensemble_model_config_id_{}.pkl'.format(id))
    print(str(np.datetime64('now')), 'INFO: model saved.')
Пример #29
0
def load_config(book: str) -> Config:
    with open(utils.get_config_file(book)) as f:
        values = yaml.safe_load(f)

    return Config(book, values)