Пример #1
0
def main():
    args = parse_args()
    inputFileNameUsage = args['input_file_usage']
    inputFileNameOperative = args['input_file_operative']
    outputApplicationsFileName = args['output_file_applications']
    outputUsersFileName = args['output_file_users']
    predictionOutputFileName = args['output_file_applications_prediction']

    utils.log_config()
    logger = logging.getLogger(__name__)

    startTime = datetime.datetime.now()

    # exported to global scope for debugging purposes
    global df
    df = data_helper.import_data(inputFileNameUsage)

    global odf

    if inputFileNameOperative:
        odf = data_helper.import_operative_data(inputFileNameOperative)
    else:
        odf = None

    logger.info("N of events: {}, from {} to {} ".format(
        len(df), df['datetime'].min(), df['datetime'].max()))

    create_user_summary(outputUsersFileName)
    create_application_summary(outputApplicationsFileName)
    create_prediction_summary(predictionOutputFileName)

    print_stats(startTime)
Пример #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--hp_file', type=str, default='hparams.py')
    args = parser.parse_args()
    hp.configure(args.hp_file)
    fill_variables(hp)
    log_config(hp)

    os.makedirs(hp.save_dir, exist_ok=True)

    n_gpus = torch.cuda.device_count()
    args.__setattr__('n_gpus', n_gpus)

    if n_gpus > 1:
        run_distributed(run_training, args, hp)
    else:
        run_training(0, args, hp, None)
Пример #3
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--hp_file', metavar='FILE', default='hparams.py')
    args = parser.parse_args()

    #overwrite_hparams(args)
    hp.configure(args.hp_file)
    fill_variables()
    hp.save_dir = os.path.join(hp.save_dir, 'LM')
    os.makedirs(hp.save_dir, exist_ok=True)

    if hp.debug_mode == 'tensorboard':
        writer = SummaryWriter(f'{hp.save_dir}/logs/{hp.comment}')

    log_config()
    model = Model_lm(hp)

    model.apply(init_weight)

    if torch.cuda.device_count() > 1:
        # multi-gpu configuration
        ngpu = torch.cuda.device_count()
        device_ids = list(range(ngpu))
        model = torch.nn.DataParallel(model, device_ids)
    model.to(DEVICE)

    optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-5)

    load_epoch = 0
    if hp.load_checkpoints:
Пример #4
0
    else:
        start_epoch = 0
        step = 1
    
    pytorch_total_params = sum(p.numel() for p in model.parameters())
    print('params = {0:.2f}M'.format(pytorch_total_params / 1000 / 1000))
    train_epoch(model, optimizer, args, hp, step=step, start_epoch=start_epoch, rank=rank)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--hp_file', type=str, default='hparams.py')
    parser.add_argument('--debug', action='store_true')
    args = parser.parse_args()
    hp.configure(args.hp_file)
    fill_variables(hp)
    log_config(hp)

    os.makedirs(hp.save_dir, exist_ok=True)

    # # multi-gpu setup
    # if torch.cuda.device_count() > 1:
    #     # multi-gpu configuration
    #     ngpu = torch.cuda.device_count()
    #     device_ids = list(range(ngpu))
    #     model = torch.nn.DataParallel(model, device_ids)
    #     model.cuda()
    # else:
    #     model.to(DEVICE)
    
    n_gpus = torch.cuda.device_count()
    args.__setattr__('n_gpus', n_gpus)