Пример #1
0
def exec_pool(args):
    log_level = logging.DEBUG if args.debug else logging.INFO
    if args.console:
        utils.setup_console_logger(log_level)
    else:
        utils.setup_file_logger(args.data_dir, 'luby', log_level)

    id_keys = ['k', 'n', 'c', 'delta']
    id_val = [str(vars(args)[key]) for key in id_keys]
    saver = utils.Saver(args.data_dir,
                        list(zip(['type'] + id_keys, ['luby'] + id_val)))
    log = logging.getLogger('.'.join(id_val))

    k, n, arr = args.k, args.n, []
    omega = get_soliton(k, args.c, args.delta)

    def callback(cb_args):
        sim_id, num_sym = cb_args
        log.info('sim_id=%d, num_sym=%d' % (sim_id, num_sym))
        arr.append(num_sym)
        saver.add_all({'arr': arr})

    pool = Pool(processes=args.pool)
    results = [
        pool.apply_async(simulate_cw, (
            x,
            omega,
            n,
        ), callback=callback) for x in range(args.count)
    ]
    for r in results:
        r.wait()
    log.info('Finished all!')
Пример #2
0
def main():
    args = utils.setup_parser(codes.get_code_names(), models.keys(),
                              utils.decoder_names).parse_args()
    log_level = logging.DEBUG if args.debug else logging.INFO
    if args.console:
        utils.setup_console_logger(log_level)
    else:
        utils.setup_file_logger(args.data_dir, 'test', log_level)

    test(args)
Пример #3
0
    # Load the parameters
    args = parser.parse_args()
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)

    # use GPU if available
    params.cuda = torch.cuda.is_available()  # use GPU is available

    # Set the random seed for reproducible experiments
    torch.manual_seed(230)
    if params.cuda: torch.cuda.manual_seed(230)

    # Get the logger
    logger = utils.setup_file_logger(
        os.path.join(args.model_dir, 'evaluate.log'))

    # Create the input data pipeline
    utils.log("Creating the dataset...", logger)

    # load data
    data_test = pd.read_pickle(args.data_dir + "/test.pkl")
    data_test['rating'] = pd.to_numeric(data_test['rating'])
    data_test['rating'] = data_test['rating'] - 1
    # specify the test set size
    params.test_size = len(data_test)

    utils.log("- done.", logger)
    with open("data/vocab_to_index.json") as f:
        vocab_to_index = json.load(f)
    vocab_size = len(vocab_to_index)
    # use GPU if available
    params.cuda = torch.cuda.is_available()

    print('*** Fetching vocab indexing ***')
    with open("data/vocab_to_index.json") as f:
        vocab_to_index = json.load(f)
    vocab_size = len(vocab_to_index)


    # Set the random seed for reproducible experiments
    torch.manual_seed(560)
    if params.cuda: torch.cuda.manual_seed(560)
        
    # Set the logging
    #log = Logger_log(os.path.join(args.model_dir, 'train.log'))
    logger = utils.setup_file_logger(os.path.join(args.model_dir, 'train.log'))

    # Create the input data pipeline
    utils.log("Loading the datasets...", logger )
    
    # load data
    data_train = pd.read_pickle(args.data_dir+"/train.pkl")
    #data_train = data_train.head(500)
    data_train['rating'] = pd.to_numeric(data_train['rating'])
    data_train['rating'] = data_train['rating'] - 1
    data_val = pd.read_pickle(args.data_dir+"/dev.pkl")
    #data_val = data_val.head(128)
    data_val['rating'] = pd.to_numeric(data_val['rating'])
    data_val['rating'] = data_val['rating'] - 1