Пример #1
0
    def execute(self) -> None:
        parser = create_parser()
        args = parser.parse_args()

        if not args.repository:
            create_parser().print_help()
            sys.exit(0)

        if not os.path.exists('sandbox'):
            os.makedirs('sandbox')

        logger.info("Downloading repository")

        subprocess.Popen(cwd='./sandbox',
                         args=['git', 'clone', args.repository],
                         stderr=subprocess.STDOUT,
                         stdout=subprocess.DEVNULL).communicate()

        logger.info("Done")
Пример #2
0
def parse_event(str: str, on_syntax_error: Any = None):
    parser = create_parser(EventsLexer, EventsParser, InputStream(str), on_syntax_error)
    return parser.event().value
Пример #3
0
def parse_events_from_file(filepath: str, on_syntax_error: Any = None):
    parser = create_parser(EventsLexer, EventsParser, FileStream(filepath), on_syntax_error)
    return parser.events().value
Пример #4
0
def parse_command(str, on_syntax_error: Any = None):
    parser = create_parser(CommandsLexer, CommandsParser, InputStream(str),
                           on_syntax_error)
    return parser.command().value
Пример #5
0
    log('Hyperparameters: ' + str(parser_args), parser_args)
    
    best_metric_score, earlystop_cnt = float('-inf'), 0
    for epoch in range(1, parser_args.n_epochs+1):
        log('Starting Epoch: {}'.format(epoch), parser_args)
        train_losses = train(parser_args, unet, dataloader_train, epoch, optimizer, device)
        _ = validate(parser_args, unet, dataloader_train, epoch, device, name='Train')
        scores = validate(parser_args, unet, dataloader_val, epoch, device, name='Val')
        #scheduler.step(scores['avg_pearson'])
        
        if parser_args.earlystopping_patience <= 0:  # No early stopping
            if not (epoch % 10):
                log('Saving model', parser_args)
                save_checkpoint(unet, epoch, optimizer, wss_mean_std, os.path.join(parser_args.model_save_path, 'unet_best.pt'))
        else:  # Early stopping
            if scores['avg_pearson'] > best_metric_score:
                best_metric_score = scores['avg_pearson']
                earlystop_cnt = 0
                log('Saving best model', parser_args)
                save_checkpoint(unet, epoch, optimizer, wss_mean_std, os.path.join(parser_args.model_save_path, 'unet_best.pt'))
            else:
                earlystop_cnt += 1
                if earlystop_cnt >= parser_args.earlystopping_patience:
                    earlystop_cnt = 0
                    log('Early stopping at epoch %d' % epoch, parser_args)
                    break

if __name__ == "__main__":
    PARSER_ARGS = parse_config(create_parser())
    main(PARSER_ARGS)
Пример #6
0
        # Create the agent.
        dqn = DQN(model,
                  target_model_change,
                  gamma,
                  batch_size,
                  game.observation_space_shape,
                  game.action_space_size,
                  policy,
                  memory_size=replay_memory_size)

    return dqn


if __name__ == '__main__':
    # Get arguments.
    args = create_parser().parse_args()
    agent_name_prefix = args.filename_prefix
    results_name_prefix = args.results_name_prefix
    recording_name_prefix = args.recording_name_prefix
    results_save_interval = args.results_save_interval
    agent_save_interval = args.save_interval
    info_interval_current = args.info_interval_current
    info_interval_mean = args.info_interval_mean
    target_model_change = args.target_interval
    agent_path = args.agent
    agent_frame_history = args.agent_history
    plot_train_results = not args.no_plot
    save_plots = not args.no_save_plots
    plots_name_prefix = args.plot_name
    render = not args.no_render
    record = args.record