Exemple #1
0
  def insert_data(self, booklist = []):
    ''' Directly use the database to get book list.

    Parameters:
    booklist -- The gtk.liststore into which the books will be added

    '''
    # Read the config file
    try:
      config = load_config.load_config()
      self.calibre_base = HOME_DIR + "/" + config.calibre_db
      logging.info(self.calibre_base)
    except:
      return
    self.booklist = booklist
    conn = sqlite3.connect(self.calibre_base)
    c = conn.cursor()
    # Get author title pair.
    c.execute('select name, title from authors, books, books_authors_link \
    where  books.id=books_authors_link.book \
    AND authors.id=books_authors_link.author \
    ;')
    # Insert data into booklist
    for row in c:
      name = row[0]
      author = []
      author.append(name[-1]) # Last part
      author.append(", ") # Decoration
      author.append(' '.join(name[0:-1])) # All except last part adding a space between them
      author = ''.join(author) # Join all elements into a string
      self.booklist.append(['', row[0], row[1],
        '', '', '', '0', 0, 0, 'e-book'])

    return self.booklist
Exemple #2
0
  def insert_data(self, booklist = []):
    ''' Directly use the database to get book list.

    Parameters:
    booklist -- The gtk.liststore into which the books will be added

    '''
    # Read the config file
    try:
      config = load_config.load_config()
      self.calibre_base = HOME_DIR + "/" + config.calibre_db
      logging.info(self.calibre_base)
    except:
      return
    self.booklist = booklist
    conn = sqlite3.connect(self.calibre_base)
    c = conn.cursor()
    # Get author title pair.
    c.execute('select name, title from authors, books, books_authors_link \
    where  books.id=books_authors_link.book \
    AND authors.id=books_authors_link.author \
    ;')
    # Insert data into booklist
    for row in c:
      name = row[0]
      author = []
      author.append(name[-1]) # Last part
      author.append(", ") # Decoration
      author.append(' '.join(name[0:-1])) # All except last part adding a space between them
      author = ''.join(author) # Join all elements into a string
      self.booklist.append(['', row[0], row[1],
        '', '', '', '0', 0, 0, 'e-book'])

    return self.booklist
Exemple #3
0
def main():
    """Client main function"""
    config = {'id': os.getpid()}
    config = load_config.load_config(config)
    for i in range(3):
        client = asyncClientClass.ClientTask(os.getpid(), config)
        client.start()
Exemple #4
0
def main():
    parser = argparse.ArgumentParser(
        description="""char-parrot: a character-level language model 
                        using a GRU- or LSTM-based RNN, implemented with PyTorch 
                        [Text generation script]""")
    parser.add_argument("project_dir",
                        help="""Path to the project directory containing the
                             relevant model.ini configuration file. See 
                             sample_project/model.ini for a commented example"""
                        )
    parser.add_argument("-l",
                        "--load-file",
                        help="""Load previously saved model state from 
                             project_dir/LOAD_FILE. The current configuration 
                             must be consistent with that of the model 
                             which generated this file""",
                        required=True)
    parser.add_argument("-s",
                        '--seed',
                        help="""Seed used to predict the first character.
                             Must be at least as long as the number of time steps
                             specified in the config file""",
                        required=True)
    parser.add_argument("-n",
                        "--length",
                        help="Length of sequence to predict and print.",
                        required=False,
                        default=250)
    parser.add_argument("-t",
                        "--temperature",
                        help="""Temperature to use when predicting the
                             next character. Lower is more greedy, higher is
                             more random""",
                        required=False,
                        default=1)

    args = parser.parse_args()

    os.chdir(args.project_dir)
    config = load_config()

    char_parrot = model.CharParrot(
        model_type=config['model_type'],
        dataset_file=config['dataset_file'],
        case_sensitive=bool(int(config['case_sensitive'])),
        time_steps=int(config['time_steps']),
        batch_size=int(config['batch_size']),
        hidden_size=int(config['hidden_size']),
        nb_layers=int(config['nb_layers']),
        dropout=float(config['dropout']),
        learning_rate=float(config['learning_rate']),
        zero_hidden=bool(int(config['zero_hidden'])),
        save_file=None)

    char_parrot.load(args.load_file, True)

    char_parrot.generate(args.seed, int(args.length),
                         int(config['time_steps']), float(args.temperature))
Exemple #5
0
def count_experiments(series: Union[dict, List[dict]]) -> int:
    if type(series) != list:
        series = [series]
    n = 0
    for item in series:
        if type(item) is list:
            # Implicit series
            n += count_experiments(item)
        elif 'series' in item:
            # Composite experiment with explicit series
            n += 1 + sum(
                count_experiments(load_config(path))
                for path in item['series'])
        else:
            # Single experiment
            n += 1
            if 'base_experiment' in item:
                n += count_experiments(load_config(item['base_experiment']))
    return n
Exemple #6
0
    def __init__(
        self,
        box_roi_pool,
        box_head,
        box_predictor,
        # Faster R-CNN training
        fg_iou_thresh,
        bg_iou_thresh,
        batch_size_per_image,
        positive_fraction,
        bbox_reg_weights,
        # Faster R-CNN inference
        score_thresh,
        nms_thresh,
        detections_per_img,
        # Mask
        mask_roi_pool=None,
        mask_head=None,
        mask_predictor=None,
        keypoint_roi_pool=None,
        keypoint_head=None,
        keypoint_predictor=None,
    ):
        super(RoIHeads, self).__init__()

        self.box_similarity = box_ops.box_iou
        # assign ground-truth boxes for each proposal
        self.proposal_matcher = det_utils.Matcher(
            fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=False)

        self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(
            batch_size_per_image, positive_fraction)

        if bbox_reg_weights is None:
            bbox_reg_weights = (10., 10., 5., 5.)
        self.box_coder = det_utils.BoxCoder(bbox_reg_weights)

        self.box_roi_pool = box_roi_pool
        self.box_head = box_head
        self.box_predictor = box_predictor

        self.score_thresh = score_thresh
        self.nms_thresh = nms_thresh
        self.detections_per_img = detections_per_img

        self.mask_roi_pool = mask_roi_pool
        self.mask_head = mask_head
        self.mask_predictor = mask_predictor

        self.keypoint_roi_pool = keypoint_roi_pool
        self.keypoint_head = keypoint_head
        self.keypoint_predictor = keypoint_predictor

        self.boundaries, self.types_limit = load_config()
Exemple #7
0
def simulation_originale():
    CONFIG = load_config()
    simulation = SimulationOriginale(
        float(CONFIG['CONSTANTE']['X']), float(CONFIG['CONSTANTE']['Y']),
        float(CONFIG['CONSTANTE']['Z']), float(CONFIG['CONSTANTE']['W']),
        float(CONFIG['VARIABLE']['lambda']), float(CONFIG['VARIABLE']['mu']),
        float(CONFIG['VARIABLE']['alpha']), float(CONFIG['VARIABLE']['beta']),
        int(CONFIG['SIMULATION']['nombre_serveurs']),
        float(CONFIG['SIMULATION']['temps_simulation']))

    simulation.simulation_magasin()
Exemple #8
0
def init_db() -> MySQLDatabase:
    db_config = asyncio.run(load_config())['DataBase']

    # 定义数据库对象
    db = MySQLDatabase(
        database=db_config['DB'],
        user=db_config['User'],
        password=db_config['Password'],
        host=db_config['Host'],
        port=db_config['Port']
    )
    return db
Exemple #9
0
def master(comm):

    size = comm.Get_size()

    cfg = load_config('sconfig')
    file_size = int(cfg['file_size'])
    mb_per_img = cfg['image_size']
    batch_size = int(cfg['batch_size'])

    files = glob.glob(cfg['path'] + '/swmr_file*')
    for fil in files:
        os.remove(fil)
    av_files = []
Exemple #10
0
def classification_embed2d(config: dict, run_args: dict):
    base_experiment, _ = experiment_main(
        load_config(config['base_experiment']), run_args)
    encoder = base_experiment.model.get_encoder()
    encoder.requires_grad = False
    exp_params = config['exp_params']
    c, h, w = get_example_shape(exp_params['data'])
    model = create_model(**config['model_params'],
                         width=w,
                         height=h,
                         channels=c,
                         encoder=encoder)
    return ClassificationExperiment(model, params=exp_params)
Exemple #11
0
def create_viz_production(results_table_name='final_model_eval'):
    """
    Creates visualizations for models that are trained on the full training data set,
    and are used in production.

    Parameters
    ----------

    results_table_name : str
        The name of a SQL table which contains results (from the test set) about the final
        models.
    """

    # set up required variables
    local_paths_env = load_local_paths('../pipeline/local_paths.yaml')
    env = load_psql_env(local_paths_env['pgpass_path'])
    ignition = load_config(local_paths_env['ignition_path'] +
                           '_1_baseline_ignition.yaml')

    # open sql connection
    connection = SQLConn(env)
    connection.open()

    # pull data from table
    query = f"select * from results.{results_table_name};"
    results_df = pd.read_sql_query(query, connection.conn)
    results_df['label'] = results_df['review_group']

    # create directory for visualizations
    vis_dir = f'{local_paths_env["store_visualizations"]}/production_citations'

    # create folders to store visualizations
    if not os.path.exists(vis_dir):
        os.makedirs(vis_dir)

    # precision recall plots
    for rg in tqdm(results_df['review_group'].unique()):
        plot = plot_precision_recall_curve_best(results_df,
                                                rg,
                                                plot_baseline=False)
        plt.savefig(f'{vis_dir}/pr_curve-{rg}.png')
        plt.close()

    # stacked bar workload
    plot = workload_relative_stackedbar(results_df)
    plt.savefig(f'{vis_dir}/workload_relative.png')
    plt.close()

    plot_average_workload_reduction(results_df)
    plt.savefig(f'{vis_dir}/workload_average.png')
    plt.close()
Exemple #12
0
def main():
    parser = argparse.ArgumentParser(
        description="""char-parrot: a character-level language model 
                        using a GRU- or LSTM-based RNN, implemented with PyTorch 
                        [Training script]""")
    parser.add_argument("project_dir",
                        help="""Path to the project directory containing the
                             relevant model.ini configuration file. See 
                             sample_project/model.ini for a commented example"""
                        )
    parser.add_argument("-e",
                        "--epochs",
                        help="Number of training epochs",
                        required=False,
                        default=10)
    parser.add_argument(
        "-s",
        "--save-file",
        help="""Save model state to project_dir/SAVE_FILE after 
                             every epoch, overwriting any existing file""",
        required=False)
    parser.add_argument(
        "-l",
        "--load-file",
        help="""Load model state from project_dir/LOAD_FILE. The
                             current configuration must be consistent with
                             that of the model which generated this file""",
        required=False)

    args = parser.parse_args()

    os.chdir(args.project_dir)
    config = load_config()

    char_parrot = model.CharParrot(
        model_type=config['model_type'],
        dataset_file=config['dataset_file'],
        case_sensitive=bool(int(config['case_sensitive'])),
        time_steps=int(config['time_steps']),
        batch_size=int(config['batch_size']),
        hidden_size=int(config['hidden_size']),
        nb_layers=int(config['nb_layers']),
        dropout=float(config['dropout']),
        learning_rate=float(config['learning_rate']),
        zero_hidden=bool(int(config['zero_hidden'])),
        save_file=args.save_file)
    if args.load_file:
        char_parrot.load(args.load_file)

    char_parrot.train(int(args.epochs))
def run():
    __CONFIG__ = load_config()
    
    print('Preparing base folder ... ')
    if not os.path.exists(os.path.join(__CONFIG__['base-folder'], 'classificator')):
        os.makedirs(os.path.join(__CONFIG__['base-folder'], 'classificator'))
        
    print('Loading data ...')
    max_dataset_size = __CONFIG__['max-dataset-size'] if __CONFIG__['max-dataset-size'] > 0 else None
    positive_documents_train, positive_documents_test = load_documents(os.path.join(__CONFIG__['input-folder'],
                                                                                    'positive'), max_dataset_size, __CONFIG__)
    unlabeled_documents_train, unlabeled_documents_test = load_documents(os.path.join(__CONFIG__['input-folder'],
                                                                                      'negative'), max_dataset_size, __CONFIG__) 
        
    positive_documents_train, unlabeled_documents_train = unskew(positive_documents_train, unlabeled_documents_train)
    positive_documents_test, unlabeled_documents_test = unskew(positive_documents_test, unlabeled_documents_test)

    print("Training ...")
    tokenizer = SimpleGermanTokenizer()
    gt = []
    for document in positive_documents_train:
        gt.append('medical')
    for document in unlabeled_documents_train:
        gt.append('nonmedical')
    
    pipeline = Pipeline([
    ('vectorizer',  TfidfVectorizer(tokenizer=tokenizer.tokenize)),
    ('classifier',  MultinomialNB()) ])
    
    pipeline.fit(positive_documents_train + unlabeled_documents_train, gt)

    model_path = os.path.join(__CONFIG__['base-folder'], 'classificator', __CONFIG__['classifier-name'] + '.pickle')
    print("Saving model to %s ..." % model_path)    
    with open(model_path, 'wb') as file:
        pickle.dump(pipeline, file)

    print('Testing ...')
    gt_test = []
    for document in tqdm.tqdm(positive_documents_test):
        gt_test.append('medical')
    for document in tqdm.tqdm(unlabeled_documents_test):
        gt_test.append('nonmedical')
        
    predictions = pipeline.predict(positive_documents_test+unlabeled_documents_test)
    print(confusion_matrix(gt_test, predictions))
    print('F1: %s'  % f1_score(gt_test, predictions, pos_label='medical'))
    print('Accuracy: %s' % accuracy_score(gt_test, predictions))
    print('Precision: %s' % precision_score(gt_test, predictions, pos_label='medical'))
    print('Recall: %s' % recall_score(gt_test, predictions, pos_label='medical'))
Exemple #14
0
def init_celery() -> Celery:
    celery_config = asyncio.run(load_config())['CeleryRedis']

    # 配置uri
    broker_uri = backend_uri = "redis://{User}:{Password}@{Host}:{Port}/{DB}".format(
        User=celery_config['User'],
        Password=quote_plus(celery_config['Password']),
        Host=celery_config['Host'],
        Port=celery_config['Port'],
        DB=celery_config['DB'])

    # 配置Celery
    celery_obj = Celery('celery_app', broker=broker_uri, backend=backend_uri)

    return celery_obj
Exemple #15
0
def hparam_search(config: dict, run_args: dict):
    import ray
    from ray import tune
    run_config = load_config(config['experiment'])
    run_config = generate_run_config(run_config)
    run_config['trainer_params'] = deep_merge(
        run_config['trainer_params'].copy(),
        {
            'max_steps': config['num_train_steps'],
            #'val_check_interval': config['num_train_steps'],
            'limit_val_batches': config['num_val_steps'],
            #'log_every_n_steps': 1,
            #'max_epochs': config.get('num_epochs', 1),
        })
    if config.get('randomize_seed', False):
        print('Warning: randomizing seed for each trial')
        run_config['manual_seed'] = tune.sample_from(
            lambda spec: np.random.randint(0, 64_000))
    ray.init(num_gpus=1)
    print("ray.get_gpu_ids(): {}".format(ray.get_gpu_ids()))
    analysis = tune.run(
        tune.with_parameters(experiment_main,
                             run_args=dict(**run_args, enable_tune=True)),
        name=run_config['entrypoint'],
        config=run_config,
        local_dir=run_args['save_dir'],
        num_samples=config['num_samples'],
        resources_per_trial=deep_merge({
            'cpu': 6,
            'gpu': 1,
        }, config.get('resources_per_trial', {})),
    )
    metric = config.get('metric', 'val/loss')
    scope = config.get('scope', 'last')
    best_config = get_best_config(analysis=analysis,
                                  metric=metric,
                                  scope=scope)
    # Restore original trainer_params, which were overridden
    # so the hparam search is shorter than a full experiment.
    best_config['trainer_params'] = config['trainer_params']

    print('Best config:')
    print(best_config)

    experiment_main(best_config, run_args)
Exemple #16
0
 def __init__(self, comm, filt):
     self.cfg = load_config('sconfig')
     self.comm = comm
     self.rank = comm.Get_rank()
     self.size = comm.Get_size()
     self.filt = filt
     self.master_msg = msg(self.rank)
     self.total_read = np.zeros(self.size - 1)
     self.total_events = np.zeros(self.size - 1)
     self.total_file_size = 0
     self.cum_event_num = 0
     self.file0_length = 0
     self.final_length = -1
     self.sd_eof = False
     # self.master_dump = open('dump/master_dump.txt', 'w')
     self.eof_lock = False
     self.last_timestamps = []
     if self.filt:
         self.hit_probability = float(self.cfg['hit_probability'])
     else:
         self.hit_probability = 1
     self.diode_lp = np.zeros(self.size - 1)
Exemple #17
0
 def search_calibre(self, needle, booklist = []):
   ''' Search the calibre database on the parameters given.
   The DB is searched twice for title and name.  Adding an OR clause to 
   the search made te query v e r y   s l o w to execute.  Much 
   quicker to do two searches.
   '''
   import string
   mybooklist = booklist
   try:
     config = load_config.load_config()
     self.calibre_base = HOME_DIR + "/" + config.calibre_db
     #logging.info(self.calibre_base)
   except:
     return
   self.booklist = booklist
   conn = sqlite3.connect(self.calibre_base)
   c = conn.cursor()
   # Get author title pair.
   query = "select name, title from authors, books, books_authors_link \
   where  books.id=books_authors_link.book \
   AND authors.id=books_authors_link.author \
   AND title LIKE '%%%s%%';"  % (needle)
   c.execute(query)
   # Insert data into booklist
   for row in c:
       mybooklist.append(['', row[0], row[1], \
       '', '', '', '0', 0, 0, 'e-book'])
   query = "select name, title from authors, books, books_authors_link \
   where  books.id=books_authors_link.book \
   AND authors.id=books_authors_link.author \
   AND name LIKE '%%%s%%';"  % (needle)
   c.execute(query)
   # Insert data into booklist
   for row in c:
       mybooklist.append(['', row[0], row[1], \
       '', '', '', '0', 0, 0, 'e-book'])
   return mybooklist
Exemple #18
0
 def search_calibre(self, needle, booklist = []):
   ''' Search the calibre database on the parameters given.
   The DB is searched twice for title and name.  Adding an OR clause to 
   the search made te query v e r y   s l o w to execute.  Much 
   quicker to do two searches.
   '''
   import string
   mybooklist = booklist
   try:
     config = load_config.load_config()
     self.calibre_base = HOME_DIR + "/" + config.calibre_db
     #logging.info(self.calibre_base)
   except:
     return
   self.booklist = booklist
   conn = sqlite3.connect(self.calibre_base)
   c = conn.cursor()
   # Get author title pair.
   query = "select name, title from authors, books, books_authors_link \
   where  books.id=books_authors_link.book \
   AND authors.id=books_authors_link.author \
   AND title LIKE '%%%s%%';"  % (needle)
   c.execute(query)
   # Insert data into booklist
   for row in c:
       mybooklist.append(['', row[0], row[1], \
       '', '', '', '0', 0, 0, 'e-book'])
   query = "select name, title from authors, books, books_authors_link \
   where  books.id=books_authors_link.book \
   AND authors.id=books_authors_link.author \
   AND name LIKE '%%%s%%';"  % (needle)
   c.execute(query)
   # Insert data into booklist
   for row in c:
       mybooklist.append(['', row[0], row[1], \
       '', '', '', '0', 0, 0, 'e-book'])
   return mybooklist
Exemple #19
0
def simulation_originale_masse():
    """
    Fait des simulations un grand nombre de fois et crée un fichier csv avec les résultats.
    """
    CONFIG = load_config()
    with open('simulation_originale1.csv', 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        for a in range(10, 36):
            recettes = []
            for i in range(100):
                simulation = SimulationOriginale(
                    float(CONFIG['CONSTANTE']['X']),
                    float(CONFIG['CONSTANTE']['Y']),
                    float(CONFIG['CONSTANTE']['Z']),
                    float(CONFIG['CONSTANTE']['W']),
                    float(CONFIG['VARIABLE']['lambda']),
                    float(CONFIG['VARIABLE']['mu']),
                    float(CONFIG['VARIABLE']['alpha']),
                    float(CONFIG['VARIABLE']['beta']), a,
                    float(CONFIG['SIMULATION']['temps_simulation']))

                simulation.simulation_magasin()
                recettes.append(simulation.recette)
            wr.writerow(recettes)
Exemple #20
0
def main():
    import argparse
    from load_config import load_config

    parser = argparse.ArgumentParser()
    parser.add_argument('pickle_dump')
    parser.add_argument('output_file')
    parser.add_argument('-c',
                        '--config_filename',
                        dest='config_filename',
                        help="Config file with private info",
                        default=None)
    parser.add_argument(
        '--max_happy_commute',
        default=45,
        type=float,
        help=
        "For plot that overlays all the commutes, what's the longest not colored red?"
    )
    parser.add_argument('--center_lat',
                        default=34.053695,
                        help="latitude to center on")
    parser.add_argument('--center_lng',
                        default=-118.430208,
                        help="longitutde to center on")
    parser.add_argument(
        '--zoom',
        default=11,
        type=int,
        help="initial zoom of maps.  goes 1 (least zoomed) to 20 (most zoomed)"
    )
    parser.add_argument(
        '--map_type',
        default='roadmap',
        help="initial zoom of maps.  goes 1 (least zoomed) to 20 (most zoomed)"
    )
    parser.add_argument(
        '--palette',
        default='Viridis',
        help="Palette to use.  Must be in bokeh.palettes.all_palettes")
    parser.add_argument(
        '--ncolors',
        type=int,
        default=256,
        help=
        "Number of colors to use.  Must be able to access bokeh.palettes.all_palettes[<palette>][<ncolors>]"
    )
    parser.add_argument('--cbar_min', default=15, type=float)
    parser.add_argument('--cbar_max', default=75, type=float)

    args = parser.parse_args()

    config, timezome = load_config(args.config_filename)
    api_key = config['api_key']

    with open(args.pickle_dump, 'rb') as f:
        print(f"Loading from {args.pickle_dump}")
        data = pickle.load(f)

    center_lats = np.array(data.pop('lat'))
    center_longs = np.array(data.pop('long'))
    names = set(k.split('_')[0] for k in data)

    dx = np.max(center_longs[1:] - center_longs[:-1]) / 2
    dy = np.max(center_lats[1:] - center_lats[:-1]) / 2

    xcoords = [[xc - dx, xc - dx, xc + dx, xc + dx] for xc in center_longs]
    ycoords = [[yc - dy, yc + dy, yc + dy, yc - dy] for yc in center_lats]
    print(f"Plotting {len(xcoords)} squares")

    try:
        args.center_lat = float(args.center_lat)
    except ValueError:
        args.center_lat = (min([yc[0] for yc in ycoords]) +
                           max([yc[1] for yc in ycoords])) / 2

    try:
        args.center_lng = float(args.center_lng)
    except ValueError:
        args.center_lng = (min([xc[0] for xc in ycoords]) +
                           max([xc[2] for xc in ycoords])) / 2

    plots = []
    bk.output_file(args.output_file, title="Commute times"),  #mode="inlne")
    moptions = GMapOptions(lat=args.center_lat,
                           lng=args.center_lng,
                           zoom=args.zoom,
                           map_type=args.map_type)

    allkeys = [
        f'{name}_{destkey}'
        for name, destkey in product(names, ['towork', 'tohome'])
    ]
    dsets = {restructure_key(key): data[key] for key in allkeys}

    color_mapper = LinearColorMapper(
        palette=all_palettes[args.palette][args.ncolors],
        low=args.cbar_min,
        high=args.cbar_max)

    nhappy = np.zeros(len(xcoords), dtype=int)
    for name in names:
        for destkey in ['towork', 'tohome']:
            key = f'{name}_{destkey}'
            colors = data[key]
            plots.append(
                plot_patches_on_gmap(xcoords,
                                     ycoords,
                                     api_key,
                                     values=colors,
                                     map_options=moptions,
                                     title=restructure_key(key),
                                     color_mapper=color_mapper))

            msk = np.array(colors) <= args.max_happy_commute
            nhappy[msk] += 1

    ## now overlap all the commutes:
    title = f'Areas where all commutes are < {args.max_happy_commute} minutes'
    plot = plot_patches_on_gmap(
        list(np.array(xcoords)[nhappy < len(allkeys) - 1]),
        list(np.array(ycoords)[nhappy < len(allkeys) - 1]),
        api_key,
        map_options=moptions,
        title=title,
        solid_fill='red')

    data = dict(xs=list(np.array(xcoords)[nhappy == len(allkeys) - 1]),
                ys=list(np.array(ycoords)[nhappy == len(allkeys) - 1]))

    source_patches = bk.ColumnDataSource(data=data)
    patches_glyph = plot.patches('xs',
                                 'ys',
                                 fill_alpha=0.25,
                                 fill_color='orange',
                                 source=source_patches,
                                 line_width=0)

    plots.append(plot)

    ## now show
    grid = gridplot(plots, ncols=2)
    bk.show(grid)
Exemple #21
0
        worker.start()
        self.proxy_check_workers.append(worker)

    # 启动下载线程
    def _start_new_worker(self):
        worker = DownloadWorker(self)
        # worker.setDaemon(True)
        worker.start()
        self.download_workers.append(worker)

    # 启动解析线程
    def _start_new_extractor(self):
        worker = ExtractorWorker(self)
        # worker.setDaemon(True)
        worker.start()
        self.extractor_workers.append(worker)


if __name__ == "__main__":
    fileConfig("logger_config.ini")
    xq_config = load_config("xq_user_article")
    services = []
    xq_article_service = XQUserArticleService(xq_config)
    xq_cube_service = XQUserCubeService(xq_config)
    xq_stock_service = XQUserStockService(xq_config)
    services.append(xq_article_service)
    services.append(xq_cube_service)
    services.append(xq_stock_service)
    crawler = Crawler(xq_config, services)
    crawler.start()
import logging, load_config, pprint
from cryptorclient import CryptorClient

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('Cryptor Daemon')


settings = load_config.load_config('E:\Cryptor\.config\cryptor.cfg')
logger.info('Loaded Config')

client = CryptorClient(settings)
logger.info('Client created successfully')

if client.verify_environment():
	logger.info('Environment is verified')
else:
	logger.info('EXITING: Reason - Environment is not verified')
	exit()

old_delta = client.db.delta(path_prefix='/Cryptor')

count = 0
flag = True
while(flag):
	count+=1
	delta = client.db.delta(old_delta['cursor'], client.settings['db_home'])




	diff = delta['entries']
Exemple #23
0
from trackbot import TrackBot
from load_config import load_config

config = load_config('config.json')

imei = "442283480893012"
tracker = TrackBot(
    config['host'],
    config['port'],
    config['buffer_size'],
    imei
)

# while True:
#     tracker.ping_tcp_random()

while True:
    print tracker.ping_api_random('123', 'Shaphil')
def main():
    """Server main function"""
    config = load_config.load_config({})
    server = asyncServerClass.ServerTask(config)
    server.start()
    server.join()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('outname')
    parser.add_argument('-c',
                        '--config_filename',
                        dest='config_filename',
                        help="Config file with private info",
                        default=None)
    parser.add_argument(
        '--state_name',
        default="California",
        help=
        "State to use for the boundary to distinguish land from water.  Points outside the state are discarded"
    )
    parser.add_argument(
        '--npts',
        default=25,
        type=int,
        help=
        "Number of points on each side of the grid (i.e. will use npts**2 points total)"
    )
    parser.add_argument('--northern_limit', default=34.219498, type=float)
    parser.add_argument('--southern_limit', default=33.816168, type=float)
    parser.add_argument('--eastern_limit', default=-118.254013, type=float)
    parser.add_argument('--western_limit', default=-118.606110, type=float)

    args = parser.parse_args()

    config, timezome = load_config(args.config_filename)
    CommuteTimes = CommuteTimesClass(key=config['api_key'])
    commutes = config['commutes']

    xv = np.linspace(args.western_limit, args.eastern_limit, args.npts)
    yv = np.linspace(args.southern_limit, args.northern_limit, args.npts)
    pairs = np.array(np.meshgrid(xv, yv)).T.reshape(-1, 2)
    xvals, yvals = pairs.T

    if args.state_name.lower() != 'none':
        import shapely.vectorized
        import shapely.geometry as sgeom
        import cartopy.io.shapereader as shpreader

        shpfilename = shpreader.natural_earth(resolution='10m',
                                              category='cultural',
                                              name='admin_1_states_provinces')
        reader = shpreader.Reader(shpfilename)
        states = list(reader.records())

        state, = [
            state for state in states
            if state.attributes['name'] == args.state_name
        ]
        geom = state.geometry

        mask = shapely.vectorized.contains(geom, xvals, yvals)
    else:
        mask = np.ones(xvals.size, dtype=bool)

    result = defaultdict(list)
    names = commutes.keys()

    for ii, (ll, la) in enumerate(tqdm(pairs)):
        if mask[ii]:
            try:
                address = f'{la},{ll}'

                res = CommuteTimes.get_commute_times(address,
                                                     commutes,
                                                     2019,
                                                     8,
                                                     7,
                                                     2,
                                                     timezone,
                                                     models=['best_guess'],
                                                     do_print=False,
                                                     do_pbar=False)

                result['lat'].append(la)
                result['long'].append(ll)
                for key in res:
                    result[key].append(res[key])
            except ValueError:
                pass

    print(f"Writing output to {args.outname}...")
    with open(args.outname, 'wb') as out:
        pickle.dump(result, out)
    print("Done!")
Exemple #26
0
logging.basicConfig(
    format='%(module)s: LINE %(lineno)d: %(levelname)s: %(message)s',
    level=logging.DEBUG)
#logging.disable(logging.INFO)
DEBUG = logging.debug
INFO = logging.info

# Get system platform
plat = sys.platform

# Get the real location of this script
iamhere = os.path.dirname(os.path.realpath(__file__))
#print "I am here",iamhere

# Read the config file
config = config.load_config()  # For file based config

try:
    db_user = config.db_user
    db_pass = config.db_pass
    db_base = config.db_base
    db_host = config.db_host
    db_lite = config.lite_db
    use = config.use  # What DB type to use
except:
    print "\nThere There is some error in the config file.\nCannot continue!\n\n "
    quit()


########################################################################
class sqlite:
def pull_results(ignition_ids,
                 table_name='results.evaluate_rg',
                 metric_col='metrics',
                 metrics=['precision_at_recall'],
                 other_cols=[
                     'ignition_id', 'hash_id, algorithm', 'hyperparameters',
                     'fold', 'recall'
                 ]):
    """
    Pull results from PSQL table into long dataframe.

    Parameters
    ==========
    ignition_ids : list
        List of ignition_ids to pull into table.
    table_name : str
        Name of PSQL table with results.
    metric_col : str
        Name of column where metrics exist.
    metrics : list
        Metrics to be included in table. Will be parsed from jsonb.
    other_cols : list
        List of other columns to included in table as is.
    labels : list
        Labels to be included in table.

    Returns
    =======
    results_df : pd.DataFrame
        Long dataframe with results from specified ignition files, metrics, and labels.
    """

    local_paths_env = load_local_paths('../pipeline/local_paths.yaml')
    env = load_psql_env(local_paths_env['pgpass_path'])
    ignition = load_config(local_paths_env['ignition_path'] +
                           '_1_baseline_ignition.yaml')

    # establish SQL Connection
    connection = SQLConn(env)
    connection.open()

    ### Set up ###
    results = {}
    ignition_ids_sql = "('" + "','".join(ignition_ids) + "')"
    other_cols_sql = ",".join(other_cols)

    ## Make one query for each label and store resulting df in a dict ###
    i = 0
    for label in ignition['classes']:
        metrics_sql = f"'{label}' as label"
        for metric in metrics:
            metrics_sql += f",{metric_col} -> '{metric}' -> '{label.lower()}' as {metric}"

        qy = f"""
        select {other_cols_sql},
        {metrics_sql}
        from
        {table_name}
        where ignition_id in {ignition_ids_sql};
        """

        results[label] = pd.read_sql_query(qy, connection.conn)

    ## Concatenate all dfs into one long df ###
    results_df = pd.concat(results.values(), ignore_index=True)

    connection.close()

    return results_df
from pymongo import MongoClient
from urllib.parse import quote_plus

def connect_db(mongo_data: dict):
    """
Takes a loaded yaml file and returns a url to connect to. 
THIS DOESN'T ESTABLISH A CONNECTION
"""
    database  = mongo_data['DATABASE']
    database_url = mongo_data['DATABASE_URL']
    port = mongo_data.get('PORT', 27017)
    
    if all(key in mongo_data for key in ("USERNAME", "PASSWORD")): # I think this is ugly
        username = quote_plus(mongo_data['USERNAME'])
        password = quote_plus(mongo_data['PASSWORD'])
        return MongoClient(host=[f'{database_url}:{port}'], username=username, password=password)

    else: 
        print("NO USERNAME OR PASSWORD DETECTED. I WOULDN'T RECOMMEND RUNNING THIS IN PROD")
        return MongoClient(host=[f'{database_url}:{port}'])


content_db_config = load_config('contentdb.yml')
user_db_config = load_config('userdb.yml')

content_connection = connect_db(content_db_config)
CONTENT_DB = content_connection[content_db_config['DATABASE']]

user_connection = connect_db(user_db_config)
USER_DB = user_connection[user_db_config['DATABASE']]
from pymongo import MongoClient
from load_config import load_config 
from urllib.parse import quote_plus

cfg = load_config('blueprints/users/config.yml')
db = cfg['db']

(
PASSWORD = quote_plus(db['PASSWORD'])
DATABASE = db['DATABASE']
USERNAME = db['USERNAME']
DATABASE_URL = db['DATABASE_URL']
PORT = db['PORT']

conn = MongoClient(DATABASE_URL, PORT)
db = conn[DATABASE]
#auth = db.authenticate(USERNAME, PASSWORD)
        server.start()
    except KeyboardInterrupt:
        server.stop()


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("type", nargs="?", default="redfish")
    args = parser.parse_args()

    if (args.type == "redfish"):
        import load_config
        from redfish import app as redfish_app

        # Load configuration settings
        load_config.load_config()
        run_rack_manager_default(app=redfish_app,
                                 ssl_enabled=True,
                                 host=load_config.service_host,
                                 port=load_config.service_port,
                                 number_threads=load_config.thread_pool)
    elif (args.type == "legacy"):
        from legacy import app as legacy_app

        run_rack_manager_default(app=legacy_app, ssl_enabled=True, port=8000)
    elif (args.type == "compile"):
        from redfish import app as redfish_app
        from legacy import app as legacy_app

        # We just want to force the python files to compile to avoid multiple processes compiling
        # the code at the same time.
from app import app
from flask import (
    render_template,
    redirect,
    url_for,
    session,
    request,
    Blueprint,
    )
from load_config import load_config

cfg = load_config('blueprints/vault/config.yml')

vault = Blueprint(
    'vault',
    __name__, 
    template_folder='templates',
    )

@vault.route('/')
def hello():
  return 'Hello World'
import functools
import threading
from queue import Queue

from load_config import load_config, create_config
from output import run_txt_updater
from twitch_chat import run_twitch_chat
from vote_exec import run_vote

if __name__ == "__main__":
    config = load_config("config.json")
    c = create_config(config)
    queue = Queue()
    response_queue = Queue()
    txt_update_queue = Queue()
    twitch_chat_thread = threading.Thread(target=functools.partial(
        run_twitch_chat, queue, response_queue, c, config))
    voting_thread = threading.Thread(target=functools.partial(
        run_vote, queue, response_queue, txt_update_queue, config["customize"],
        config["credentials"]["nickname"]))
    text_thread = threading.Thread(
        target=functools.partial(run_txt_updater, txt_update_queue))
    threads = [twitch_chat_thread, voting_thread, text_thread]

    print("Starting!")

    for t in threads:
        t.start()

    for t in threads:
        t.join()
import random
import requests
import string
import stripe
from load_config import load_config
from mailchimp_config import mailchimp_client, mailing_list_id
from mongo import USER_DB

users = Blueprint(
    'users',
    __name__, 
    template_folder='templates',
    )

users_collection = USER_DB['users']
cfg = load_config('config.yml')
STRIPE = cfg['stripe']
stripe.api_key = STRIPE['API_KEY']
SLACK = cfg['SLACK_TOKEN']

@users.route('/payment/<plan>/<coupon>', methods=['POST'])
@users.route('/payment/<plan>', methods=['POST'])
def payment_successful(plan, coupon=None):
    email = request.form['stripeEmail']
    session['email'] = email
   
    if  users_collection.find_one({'email': email, 'password':{'$exists': True}}):
        return 'This email address is already registered.'
    
    # Create Customer Account in Stripe
    customer = stripe.Customer.create(
Exemple #34
0
gettext.textdomain(APP)
_ = gettext.gettext

#logger = logging.getLogger("librarian")
logging.basicConfig(format='%(module)s: LINE %(lineno)d: %(levelname)s: %(message)s', level=logging.DEBUG)
#logging.disable(logging.INFO)

# Get system platform
plat = sys.platform

# Get the real location of this script
iamhere = os.path.dirname( os.path.realpath( __file__ ) )
#print "I am here",iamhere

# Read the config file
config = config.load_config() # For file based config

try:
  db_user = config.db_user
  db_pass = config.db_pass
  db_base = config.db_base
  db_host = config.db_host
  db_lite = config.lite_db
  use = config.use # What DB type to use
except: 
  print "\nThere There is some error in the config file.\nCannot continue!\n\n "
  quit()


########################################################################
class sqlite:
          '      |- Home.styles.tsx\n'\
          '\n'\
          '- You can also provide an "-rn" flag at the and of command input.\n'\
          '  This option will generate the .styles file with "styled.Text``" instead of "styled.div``".\n'\
          '\n'\
          '- You can also run "rcs_config help" for more informations about changing defaults.'\
          '\n\n'\
          'https://github.com/erickvieira/react-styled-components-cli'
        )
    else:
        if len(argv) > 2:
            c_name = argv[2].lower()
        if len(argv) > 3:
            is_native = argv[3].lower() == '-rn'
        else:
            is_native = load_config()['is_native']
        if len(argv) > 4:
            is_jsx = argv[4].lower() == '-jsx'

        if not c_name:
            c_name = type
            print('Generating: %s component' % c_name)
        else:
            print('Generating: %s %s' % (c_name, {
                'p': 'page',
                'c': 'component',
                'ctx': 'context'
            }[type]))

        def loading():
            dots = ['.' for i in range(0, 5)]
import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from load_config import load_config

def plot_scores(params, scores):
    plt.plot(params, scores)
    plt.title('validation on the size of the word2vec dimensionality')
    plt.ylabel('accuracy')
    plt.xlabel('vector size')
    plt.grid(True)
    plt.savefig(base_folder + 'word2vec_dimensions_validation.pdf')

if __name__ == '__main__':
    config = load_config()
    base_folder = config['base_folder']

    if not os.path.exists(base_folder):
        os.makedirs(base_folder)
    
    json.dump(config, open(base_folder + 'configuration.json','w'), indent=4, sort_keys=True)    
    
    scores = []
    params = []
    for i in range(1, 250, 10):
        print("Validate parameter " + str(i))
        config['lstm-layers'][0]['output-size'] = i
        score = run(config)
        scores.append(score)
        params.append(i)
Exemple #37
0
from pathlib import Path
from templates import component, index, styles, context
from load_config import load_config

ext = 'jsx' if load_config()['is_jsx'] else 'tsx'


def create_dir(dir_name='', super_dir=None):
    root_path = '.' if ext == 'jsx' else './src'
    if not dir_name:
        raise Exception('cant find or create folder without dir_name')
    else:
        output_dir = None
        if super_dir:
            main_dir = Path(root_path) / f'{super_dir}'
            main_dir.mkdir(exist_ok=True)
            output_dir = main_dir / f'{dir_name.capitalize()}'
        else:
            output_dir = Path(root_path) / f'{dir_name.capitalize()}'
        output_dir.mkdir(exist_ok=True)
        return output_dir


def create_components(c_name='',
                      dir_path=Path('.') / f'file',
                      is_native=False):
    if not dir_path:
        raise Exception('failed to find a path to create components')
    else:
        component_filename = '%s.%s' % (c_name.capitalize(), ext)
        index_filename = 'index.%s' % (ext)
Exemple #38
0
from mpi4py import MPI
import h5py, subprocess, glob, os
import numpy as np
import time
import os, random

from load_config import load_config

cfg = load_config('sconfig')
hit_prob = float(cfg['hit_probability'])
write_limit = int(cfg['file_size'])
mb_per_img = int(cfg['image_size'])
batch_size = int(cfg['batch_size'])
bytes_per_batch = mb_per_img * batch_size * 10**6

disk_num = int(cfg['disk_num'])
path = cfg['path'] % disk_num


def read_client(comm, filt=0):

    rank = comm.Get_rank()
    size = comm.Get_size()

    # mb_per_img = 1
    # batches = 16
    arr_size = mb_per_img * 500000
    # path = '/drpffb/eliseo/data/xtc_lite/'
    # file_name = path % rank + '/xtc_lite_%i.xtc' % rank

    file_name = path + '/xtc_lite_%i.xtc' % rank
import data_utils as dutils
from load_config import load_config, device
cf = load_config()
from logger import logger
import torch, json, sys
from data_utils import batch_to_wordpieces, wordpieces_to_bert_embs, build_token_to_wp_mapping, load_embeddings

from sklearn.metrics import f1_score, classification_report, accuracy_score

from colorama import Fore, Back, Style

import random

import nfgec_evaluate

torch.manual_seed(123)
torch.backends.cudnn.deterministic=True

class ModelEvaluator():

	def __init__(self, model, data_loader, word_vocab, wordpiece_vocab, hierarchy, bc, mode="train"):
		self.model = model 
		self.data_loader = data_loader 
		self.word_vocab = word_vocab 
		self.wordpiece_vocab = wordpiece_vocab
		self.hierarchy = hierarchy
		self.bc = bc
		self.mode = mode

		self.best_f1_and_epoch = [0.0, -1]
import json
from os import listdir
from os.path import isfile, join
from load_config import load_config

if __name__ == "__main__":
  DATA_PATH = './json_data/'
  VECT_PATH = './vectors/'
  files = [ f for f in listdir(DATA_PATH) if isfile(join(DATA_PATH, f)) ]
  """
    Load base files with entities, ignored files and categories for this example.
  """
  lc = load_config()
  lc.read_config()
  tokens = lc.get_entities()
  ignored_files = lc.get_ignored()
  CATEGORIES = lc.get_categories()

  files = [f for f in files if f not in ignored_files and '-key' not in f]
  for _file in files:
    keys = {}
    sfile = _file.split('.')
    json_key_file = open(DATA_PATH+sfile[0]+'-key.'+sfile[1], 'r')
    keys = json.loads(json_key_file.read())
    json_key_file.close()
    json_feature_file = open(DATA_PATH+sfile[0]+'.'+sfile[1], 'r')
    features = json.loads(json_feature_file.read())
    json_feature_file.close()

    end_file = open(VECT_PATH+sfile[0]+'-vec.txt', 'a')
    for feat_id in features:
Exemple #41
0
'''
Add new borrowers to the list.  Remove too I guess.
'''
import MySQLdb
import MySQLdb.cursors
import sys
import load_config
import logging
import book
import locale
import gettext
import popen2

locale.setlocale(locale.LC_ALL, '')
APP = 'librarian'
gettext.textdomain(APP)
_ = gettext.gettext

logger = logging.getLogger("librarian")
logging.basicConfig(
    format='%(module)s: LINE %(lineno)d: %(levelname)s:%(message)s',
    level=logging.DEBUG)

# Read the config file

config = load_config.load_config()
db_user = config.db_user
db_pass = config.db_pass
db_base = config.db_base
db_host = config.db_host
Exemple #42
0
    parser.add_argument('--validate',
                        dest="validate",
                        metavar='VALIDATE',
                        help='validation mode',
                        default=False)
    args = parser.parse_args()

    if args.smoke_test:
        print(
            'Executing smoke test - training will stop after a couple steps.')

    cudnn.deterministic = True
    cudnn.benchmark = True
    decord.bridge.set_bridge('torch')

    config = load_config(args.config)
    total_experiments = count_experiments(config)

    num_samples = args.num_samples or 1

    deltas = []
    for i in range(num_samples):
        print(f'Running sample {i+1}/{num_samples} on cuda:{args.gpu}')
        start = time.time()
        run_series(config,
                   save_dir=args.save_dir,
                   num_threads=args.num_threads,
                   gpu=args.gpu,
                   exp_no=0,
                   total_experiments=total_experiments,
                   smoke_test=args.smoke_test,
Exemple #43
-1
def deploy():
    load_config()
    install_machine()
    deploy_geoserver()
    supervisor.update_config()
    restart_services()
    restart_geotaxi()
    deploy_api()