Ejemplo n.º 1
0
def main():
    # Establish gmail connection
    creds = None
    # The file token.pickle stores the user's access and refresh tokens, and is
    # created automatically when the authorization flow completes for the first
    # time.
    if os.path.exists('token.pickle'):
        with open('token.pickle', 'rb') as token:
            creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.
    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            flow = InstalledAppFlow.from_client_secrets_file(
                'credentials.json', SCOPES)
            creds = flow.run_local_server(port=0)
        # Save the credentials for the next run
        with open('token.pickle', 'wb') as token:
            pickle.dump(creds, token)

    service = build('gmail', 'v1', credentials=creds)

    print('{} Establishing params'.format(getTime()))
    params = set_params()
    params['service'] = service
    params['user_id'] = 'me'
    printGreen('{} Successfully established params'.format(getTime()))

    listener = HaroListener(params)
    listener.messageLoop()
Ejemplo n.º 2
0
Archivo: train.py Proyecto: yyht/daga
def main():
    """Main workflow"""
    args = utils.build_args(argparse.ArgumentParser())

    utils.init_logger(args.model_file)

    assert torch.cuda.is_available()
    torch.cuda.set_device(args.gpuid)

    utils.init_random(args.seed)

    utils.set_params(args)
    logger.info("Config:\n%s", pformat(vars(args)))

    fields = utils.build_fields()
    logger.info("Fields: %s", fields.keys())

    logger.info("Load %s", args.train_file)
    train_data = LMDataset(fields, args.train_file, args.sent_length_trunc)
    logger.info("Training sentences: %d", len(train_data))
    logger.info("Load %s", args.valid_file)
    val_data = LMDataset(fields, args.valid_file, args.sent_length_trunc)
    logger.info("Validation sentences: %d", len(val_data))

    fields["sent"].build_vocab(train_data)

    train_iter = utils.build_dataset_iter(train_data, args)
    val_iter = utils.build_dataset_iter(val_data, args, train=False)

    if args.resume and os.path.isfile(args.checkpoint_file):
        logger.info("Resume training")
        logger.info("Load checkpoint %s", args.checkpoint_file)
        checkpoint = torch.load(args.checkpoint_file,
                                map_location=lambda storage, loc: storage)
        es_stats = checkpoint["es_stats"]
        args = utils.set_args(args, checkpoint)
    else:
        checkpoint = None
        es_stats = ESStatistics(args)

    model = utils.build_model(fields, args, checkpoint)
    logger.info("Model:\n%s", model)

    optimizer = utils.build_optimizer(model, args, checkpoint)

    try_train_val(fields, model, optimizer, train_iter, val_iter, es_stats,
                  args)
def main() -> None:
    auth_response = send_auth_request(GRID_ADDRESS, NAME, VERSION)
    worker_id = auth_response["data"]["worker_id"]

    cycle_response = send_cycle_request(GRID_ADDRESS, NAME, VERSION, worker_id)
    request_key = cycle_response["data"]["request_key"]
    model_id = cycle_response["data"]["model_id"]
    client_config = cycle_response["data"]["client_config"]
    alpha = client_config["alpha"]
    gamma = client_config["gamma"]
    min_epsilon = client_config["min_epsilon"]
    epsilon_reduction = client_config["epsilon_reduction"]
    n_train_iterations = client_config["n_train_iterations"]
    n_test_iterations = client_config["n_test_iterations"]

    downloaded_params = get_model_params(GRID_ADDRESS, worker_id, request_key,
                                         model_id)

    local_agent = QLearningAgent(
        input_width=INPUT_WIDTH,
        output_width=OUTPUT_WIDTH,
        hidden_width=HIDDEN_WIDTH,
        alpha=alpha,
        gamma=gamma,
        min_epsilon=min_epsilon,
        epsilon_reduction=epsilon_reduction,
    )
    set_params(local_agent, downloaded_params)

    _, pre_rets = run_epoch(n_test_iterations, local_agent, train=False)
    print(f"Pre-training performance: {sum(pre_rets) / n_test_iterations}")

    trained_params, _ = run_epoch(n_train_iterations, local_agent, train=True)

    _, post_rets = run_epoch(n_test_iterations, local_agent, train=False)
    print(f"Post-training performance: {sum(post_rets) / n_test_iterations}")

    diff = calculate_diff(downloaded_params, trained_params)
    send_diff_report(GRID_ADDRESS, worker_id, request_key, diff)

    new_model_params = retrieve_model_params(GRID_ADDRESS, NAME, VERSION)
    set_params(local_agent, new_model_params)

    _, updated_rets = run_epoch(n_test_iterations, local_agent, train=False)
    print(
        f"Updated model performance: {sum(updated_rets) / n_test_iterations}")
Ejemplo n.º 4
0
def main():
    """Main workflow"""
    args = utils.build_gen_args(argparse.ArgumentParser())

    utils.init_logger(args.out_file)
    logger.info("Config:\n%s", pformat(vars(args)))

    assert torch.cuda.is_available()
    torch.cuda.set_device(args.gpuid)

    utils.init_random(args.seed)

    logger.info("Load parameters from '%s'", args.model_file)
    params = torch.load(args.model_file, map_location=lambda storage, loc: storage)

    utils.set_params(params["args"])

    fields = utils.load_fields_from_vocab(params["vocab"])
    logger.info("Fields: %s", fields.keys())

    model = utils.build_test_model(fields, params)

    sent_idx = [i for i in range(args.num_sentences)]
    num_batches = math.ceil(float(args.num_sentences) / args.batch_size)
    samples = []
    with torch.no_grad():
        for i in range(num_batches):
            running_batch_size = len(
                sent_idx[i * args.batch_size : (i + 1) * args.batch_size]
            )
            samples.append(
                model.generate(
                    running_batch_size, args.max_sent_length, args.temperature
                )
            )
    samples = torch.cat(samples, 0)
    save(samples, fields, args.out_file)
Ejemplo n.º 5
0
def run(mode):
    # modeの選択 → hra+1
    valid_modes = ['dqn', 'dqn+1', 'hra', 'hra+1', 'all']
    assert mode in valid_modes
    if mode in ['all']:
        modes = valid_modes[:-1]
    else:
        modes = [mode]

    dir_path = os.path.dirname(os.path.realpath(__file__))
    cfg_file = os.path.join(dir_path, 'config.yaml')
    params = yaml.safe_load(open(cfg_file, 'r'))

    for m in modes:
        params = set_params(params, m)
        worker(params)


# if __name__ == '__main__':
#     run(mode='hra+1')
Ejemplo n.º 6
0
def run(mode, options):
    valid_modes = ['dqn', 'dqn+1', 'hra', 'hra+1', 'all']
    assert mode in valid_modes
    if mode in ['all']:
        modes = valid_modes[:-1]
    else:
        modes = [mode]

    dir_path = os.path.dirname(os.path.realpath(__file__))
    cfg_file = os.path.join(dir_path, 'config.yaml')
    params = yaml.safe_load(open(cfg_file, 'r'))
    # replacing params with command line options
    for opt in options:
        assert opt[0] in params
        dtype = type(params[opt[0]])
        if dtype == bool:
            new_opt = False if opt[1] != 'True' else True
        else:
            new_opt = dtype(opt[1])
        params[opt[0]] = new_opt

    for m in modes:
        params = set_params(params, m)
        worker(params)
Ejemplo n.º 7
0
create_metric_holders, wg_eval_acc_metrics_update_i, \
sg_eval_acc_metrics_update_i, save_eval_metrics

randseed = int(time.time())
print("random seed: ", randseed)
random.seed(randseed)
npr.seed(randseed)
ed.set_seed(randseed)
tf.set_random_seed(randseed)

if __name__ == '__main__':

    parser = create_argparser()
    args = parser.parse_args()

    all_params = set_params(args)
    DATA_DIR, CAUSEFIT_DIR, OUT_DATA_DIR, \
        outdim, caudim, thold, M, n_iter, binary, \
        pri_U, pri_V, alpha = all_params

    print("setting params....")
    print("data/cause/out directories", DATA_DIR, CAUSEFIT_DIR, OUT_DATA_DIR)
    print("relevance thold", thold)
    print("batch size", M, "n_iter", n_iter)
    print("outdim", outdim)
    print("caudim", caudim)
    print("prior sd on U", pri_U, "prior sd on V", pri_V)
    print("alpha", alpha)

    unique_uid = list()
    with open(os.path.join(DATA_DIR, 'unique_uid.txt'), 'r') as f:
Ejemplo n.º 8
0
 def set_params(self):
     utils.set_params(self.params_file, self.p)
     self.p = utils.get_params(self.params_file)
Ejemplo n.º 9
0
Archivo: test.py Proyecto: yyht/daga
def main():
    """Main workflow"""
    args = utils.build_test_args(argparse.ArgumentParser())

    suff = ".test"
    if args.report_iw_nll:
        if (
            args.num_iw_samples > args.iw_batch_size
            and args.num_iw_samples % args.iw_batch_size != 0
        ):
            raise RuntimeError("Expected num_iw_samples divisible by iw_batch_size")
        suff = ".test.iw" + str(args.num_iw_samples)

    utils.init_logger(args.model_file + suff)
    logger.info("Config:\n%s", pformat(vars(args)))

    assert torch.cuda.is_available()
    torch.cuda.set_device(args.gpuid)

    utils.init_random(args.seed)

    logger.info("Load parameters from '%s'", args.model_file)
    params = torch.load(args.model_file, map_location=lambda storage, loc: storage)

    utils.set_params(params["args"])

    fields = utils.load_fields_from_vocab(params["vocab"])
    logger.info("Fields: %s", fields.keys())

    model = utils.build_test_model(fields, params)
    logger.info("Model:\n%s", model)

    logger.info("Load %s", args.test_file)
    test_data = LMDataset(fields, args.test_file, args.sent_length_trunc)
    logger.info("Test sentences: %d", len(test_data))

    test_iter = utils.OrderedIterator(
        dataset=test_data,
        batch_size=args.batch_size,
        device=params["args"].device,
        train=False,
        shuffle=False,
        repeat=False,
        sort=False,
        sort_within_batch=True,
    )

    if model.encoder is None:
        args.report_iw_nll = False
        logger.info("Force report_iw_nll to False")

    start_time = time.time()
    logger.info("Start testing")
    if args.report_iw_nll:
        if args.num_iw_samples <= args.iw_batch_size:
            n_iw_iter = 1
        else:
            n_iw_iter = args.num_iw_samples // args.iw_batch_size
            args.num_iw_samples = args.iw_batch_size

        test_stats = report_iw_nll(model, test_iter, n_iw_iter, args.num_iw_samples)
        logger.info(
            "Results: test nll %.2f | test ppl %.2f", test_stats.nll(), test_stats.ppl()
        )
    else:
        test_stats = validate(model, test_iter)
        logger.info(
            "Results: test nll %.2f | test kl %.2f | test ppl %.2f",
            test_stats.nll(),
            test_stats.kl(),
            test_stats.ppl(),
        )

    logger.info("End of testing: time %.1f min", (time.time() - start_time) / 60)
Ejemplo n.º 10
0
import argparse
from utils import set_params, set_download_dir, format_datetime, scrape_images, extract_average_color, classify_images, df_to_csv, df_to_excel, label_images
    
if __name__ == "__main__":
    
    parser = argparse.ArgumentParser()
    profile, num, startdate, enddate, mode, season = set_params(parser)
    
    #Output directory for Scraped Images
    output = set_download_dir(startdate, enddate)
    
    format1 = '%Y-%m-%d'
    
    startdate, enddate = format_datetime(format1, startdate, enddate)
    
    # Mode for scraping images and saving them in output folder
    if mode == '0':
        print("starting download.....")
        scrape_images(profile, num, startdate, enddate, output)
        print("Download Complete!")
    
    # Mode for detecting color and exporting dataframe as .csv
    elif mode == '1':
        classify_images(output, '.jpg','color')
    # Mode for labelling the images
    elif mode == '2':
        label_images(output, '.jpg', season)
    #elif mode == '3':