def get(self, request, format=None):

        info = {utils.get_resource_name(name):
                utils.get_serializer_class(name)
                (utils.get_model_class(name).objects.all(),
                    many=True).data
                for name in utils.get_info_names()}

        return Response(info)
Example #2
0
def test(cfg):
    """Test a model printing batch flow in call() method."""
    print(cfg)
    Model = get_model_class(cfg.model)
    print(Model.__name__)
    batch = [
        np.zeros(cfg.batch, dtype=np.float32) for _ in range(len(cfg._dss))
    ]
    model = Model(cfg, verbose=True)
    model(batch)
    model.summary()
Example #3
0
def test(cfg):
  """Test a model printing batch flow in call() method."""
  print(cfg)
  Model = get_model_class(cfg.model)
  print(Model.__name__)
  xs = [np.zeros(cfg.batch, dtype=np.float32) for _ in range(len(cfg._dss))]
  model = Model(cfg, verbose=True)
  ys = model(xs)
  for i, y in enumerate(ys):
    print(
      f"  ys[{i}] {y.dtype} {y.shape}\n"
      f"  ys[{i}] {y.numpy().flatten()[:5]}"
    )
  model.summary()
Example #4
0
def setup_model(args, env):
    "Return the model for the given `args` in the Gym `env`."
    model_class = utils.get_model_class(args.model_class)
    policy_class = utils.get_policy_class(args.policy_class, args.model_class)

    utils.check_num_envs(args, policy_class)

    policy_kwargs = args.policy_kwargs
    if args.activation_fn is not None:
        policy_kwargs['activation_fn'] = utils.get_activation_fn(
            args.activation_fn)

    # Learning rate to try for PPO2: 1E-05
    # Learning rate to try for ACKTR: 1E-03
    learning_rate = utils.compute_learning_rate(args)

    model_kwargs = {
        'verbose':
        1,
        'tensorboard_log':
        str(
            Path(f'./sdc_tensorboard/'
                 f'{args.model_class.lower()}_{args.policy_class.lower()}_'
                 f'{args.script_start}/')),
    }
    model_kwargs.update(args.model_kwargs)
    model_kwargs.update({
        'learning_rate': learning_rate,
        'policy_kwargs': policy_kwargs,
        'seed': args.seed,
    })

    utils.maybe_fix_nminibatches(model_kwargs, args, policy_class)

    if args.model_path is None:
        model = model_class(policy_class, env, **model_kwargs)
        return model

    if args.change_loaded_model:
        model_kwargs.pop('policy_kwargs', None)
        model = model_class.load(str(Path(args.model_path)), env,
                                 **model_kwargs)
    else:
        model = model_class.load(str(Path(args.model_path)), env)
    return model
    template_name = 'index.html'

    def get_context_data(self, **kwargs):
        context = super(MainHomePage, self).get_context_data(**kwargs)

        forms_list = utils.get_info_forms()
        forms = list((".".join([utils.get_form_id(form), "html"]), form())
                     for form in forms_list)
        partials = utils.get_partials()
        context.update({'forms': forms, 'partials': partials})
        return context


for name in utils.get_info_names():
    gen_class = type(utils.get_viewset_name(name), (
        viewsets.ModelViewSet, ), {'model': utils.get_model_class(name)})
    setattr(sys.modules[__name__], gen_class.__name__, gen_class)


class InfoView(APIView):

    """
    View for getting a bulk of models information at once.
    """

    def get(self, request, format=None):

        info = {utils.get_resource_name(name):
                utils.get_serializer_class(name)
                (utils.get_model_class(name).objects.all(),
                    many=True).data
Example #6
0
                        help='Text file with embeddings vocabulary')
    parser.add_argument('-v',
                        help='Verbose',
                        action='store_true',
                        dest='verbose')
    parser.add_argument('-e',
                        help='Print pairs and labels that got a wrong answer',
                        action='store_true',
                        dest='errors')
    args = parser.parse_args()

    utils.config_logger(verbose=args.verbose)
    params = ioutils.load_params(args.model)
    sess = tf.InteractiveSession()

    model_class = utils.get_model_class(params)
    model = model_class.load(args.model, sess)
    word_dict, embeddings = ioutils.load_embeddings(args.embeddings,
                                                    args.vocabulary,
                                                    generate=False,
                                                    load_extra_from=args.model,
                                                    normalize=True)
    model.initialize_embeddings(sess, embeddings)
    label_dict = ioutils.load_label_dict(args.model)
    print('Label dict[Y] : ', label_dict['Y'])
    #    pairs = ioutils.read_corpus(args.dataset, params['lowercase'],
    #           params['language'])
    #dataset = utils.create_dataset(pairs, word_dict, label_dict)
    dataset, labels, sents1, sents2 = utils.create_dataset_SSQA(
        args.dataset, word_dict, label_dict)
    #for pair in pairs:
Example #7
0
def run_tests(model, args, seed=None, fig_path=None, stats_path=None, heat=True):
    """Run tests for the given `model` and `args`, using `seed` as the
    random seed.

    `fig_path` is an optional path to store result plots at.
    `stats_path` is an optional path where to save statistics about the
    reinforcement learning test.
    """
    # Load the trained agent for testing
    if isinstance(model, (Path, str)):
        model_class = utils.get_model_class(args.model_class)
        model = model_class.load(str(model))

    # Not vectorizing is faster for testing for some reason.
    num_test_envs = args.num_envs \
        if not args.use_sb3 and model.policy.recurrent else 1

    #fig_path = Path(f'results_{script_start}.pdf')
    #args.run_heat=False
    #if (args.test_heat):
    #    args.run_heat=True #Test with HeatEquation else test with TestEquation
    #run_tests(model, args, seed=eval_seed, fig_path=fig_path) 

    ntests = int(args.tests)
    ntests = utils.maybe_fix_ntests(ntests, num_test_envs)

    start_time = time.perf_counter()

    # Plot all three iteration counts over the lambda values
    plt.xlabel('re(λ)')
    plt.ylabel('iterations')

    # Test the trained model.
    env = utils.make_env(args, num_envs=num_test_envs, seed=seed  ,      lambda_real_interpolation_interval=None)
    results_RL = test_model( model, env, ntests, 'RL', stats_path=stats_path)
    plot_results(results_RL, color='b', label='RL')


    if (args.show_LU):
        # Restart the whole thing, but now using the LU preconditioner (no RL here)
        # LU is serial and the de-facto standard. Beat this (or at least be on par)
        # and we win!
        env = utils.make_env(
            args,
            num_envs=num_test_envs,
            prec='LU',
            seed=seed,
            lambda_real_interpolation_interval=None,
        )
        results_LU = test_model(model, env, ntests, 'LU')
        plot_results(results_LU, color='r', label='LU')



    if (args.show_MIN):
        # Restart the whole thing, but now using the minization preconditioner
        # (no RL here)
        # This minimization approach are just magic numbers we found using
        # indiesolver.com, parallel and proof-of-concept
        env = utils.make_env(
            args,
            num_envs=num_test_envs,
            prec='min',
            seed=seed,
            lambda_real_interpolation_interval=None,
        )
        results_min = test_model(model, env, ntests, 'MIN')
        plot_results(results_min, color='g', label='MIN')

    if (args.show_optMIN):
        # Restart the whole thing, but now using the optimized minization preconditioner
        # (no RL here)
        env = utils.make_env(
            args,
            num_envs=num_test_envs,
            prec='optMIN',
            seed=seed,
            lambda_real_interpolation_interval=None,
        )
        results_optmin = test_model(model, env, ntests, 'optMIN')
        plot_results(results_optmin, color='y', label='optMIN')


    if (args.show_trivial0):
        # Restart the whole thing, but now using the trivial preconditioner (Q_d=0)
        # (no RL here)
        env = utils.make_env(
            args,
            num_envs=num_test_envs,
            prec='trivial0',
            seed=seed,
            lambda_real_interpolation_interval=None,
        )
        results_triv0 = test_model(model, env, ntests, 'trivial0')
        plot_results(results_triv0, color='c', label='trivial0')

    if (args.show_trivial1):
        # Restart the whole thing, but now using the trivial preconditioner (Q_d=I)
        # (no RL here)
        env = utils.make_env(
            args,
            num_envs=num_test_envs,
            prec='trivial1',
            seed=seed,
            lambda_real_interpolation_interval=None,
        )
        results_triv1 = test_model(model, env, ntests, 'trivial1')
        plot_results(results_triv1, color='m', label='trivial1')

    duration = time.perf_counter() - start_time
    print(f'Testing took {duration} seconds.')

    #plt.legend()

    if fig_path is not None:
        plt.savefig(fig_path, bbox_inches='tight')
    plt.show()
Example #8
0
def main():
    TokenizerClass = utils.get_tokenizer_class(args.model_name)
    ModelClass = utils.get_model_class(args.model_name)
    ConfigClass = utils.get_config_class(args.model_name)
    tokenizer = TokenizerClass.from_pretrained(args.model_path)
    config = ConfigClass.from_pretrained(args.model_path)
    args.model_config = config
    DATA = {}
    if args.train:
        data_trans = Data_Transformer(args, tokenizer)
        DATA['train'] = Dataset(data_trans, tokenizer, args.data_paths_train,
                                args, '{}.train'.format(args.exp_name))
        DATA['valid'] = Dataset(data_trans, tokenizer, args.data_paths_valid,
                                args, '{}.valid'.format(args.exp_name))
        train_dataloader = torch.utils.data.DataLoader(
            DATA['train'],
            batch_size=args.batch_size,
            collate_fn=data_trans.collate_fn,
            shuffle=True)
        valid_dataloader = torch.utils.data.DataLoader(
            DATA['valid'],
            batch_size=args.batch_size,
            collate_fn=data_trans.collate_fn,
            shuffle=True)
    if args.test:
        data_trans = Data_Transformer(args, tokenizer, test=True)
        DATA['test'] = Dataset(data_trans,
                               tokenizer,
                               args.data_paths_test,
                               args,
                               '{}.test'.format(args.exp_name),
                               test=True)

        test_dataloader = torch.utils.data.DataLoader(
            DATA['test'],
            batch_size=args.batch_size,
            collate_fn=data_trans.collate_fn,
            shuffle=False)

    result_path = os.path.join(args.tmp_path, 'result.pkl')
    if result_path not in args.tmp_files:
        args.tmp_files.append(result_path)
    if not os.path.isfile(result_path):
        results = {mode: {} for mode in ['train', 'valid']}
        results['test'] = {
            key: []
            for key in [
                'loss', 'pearson', 'pred', 'true', 'vector'
                'raw_src', 'raw_ref', 'raw_hyp'
            ]
        }
    else:
        with open(result_path, mode='rb') as r:
            results = pickle.load(r)
    if args.optimizer not in results['valid']:
        results['train'][args.optimizer] = {}
        results['valid'][args.optimizer] = {}
    if 'batch={}'.format(
            args.batch_size) not in results['valid'][args.optimizer]:
        results['train'][args.optimizer]['batch={}'.format(
            args.batch_size)] = [{
                key: []
                for key in [
                    'loss', 'pearson', 'pred', 'true', 'raw_src', 'raw_ref',
                    'raw_hyp'
                ]
            } for _ in range(args.trial_times)]
        results['valid'][args.optimizer]['batch={}'.format(
            args.batch_size)] = [{
                key: []
                for key in [
                    'loss', 'pearson', 'pred', 'true', 'raw_src', 'raw_ref',
                    'raw_hyp'
                ]
            } for _ in range(args.trial_times)]

    best_valid_pearson_path = os.path.join(args.tmp_path,
                                           'best_valid_pearson.pkl')
    if best_valid_pearson_path not in args.tmp_files:
        args.tmp_files.append(best_valid_pearson_path)
    if not os.path.isfile(best_valid_pearson_path):
        best_valid_pearson = {
            'optimizer': '',
            'batch_size': 0,
            'n_trial': 1,
            'epoch': 1,
            'pearson': -1.0
        }
    else:
        with open(best_valid_pearson_path, mode='rb') as r:
            best_valid_pearson = pickle.load(r)

    lang_availables = []  # only for test

    model = build_model(ModelClass, config, args)
    if args.train:
        if len(results['valid'][args.optimizer]['batch={}'.format(
                args.batch_size)][args.n_trial -
                                  1]['pearson']) != args.epoch_size:
            best_valid_pearson, results = _run_train(
                best_valid_pearson, train_dataloader, valid_dataloader, args,
                results, ModelClass, ConfigClass, model, config)
            with open(best_valid_pearson_path, mode='wb') as w:
                pickle.dump(best_valid_pearson, w)

        args.logger.info('Best Valid Pearson : {}'.format(
            best_valid_pearson['pearson']))
        args.logger.info(
            'Best Hyper-paramer : {}, batch={}, n_trial={}, epoch={}'.format(
                best_valid_pearson['optimizer'],
                best_valid_pearson['batch_size'],
                best_valid_pearson['n_trial'], best_valid_pearson['epoch']))

    if args.test:
        args.logger.info('running test')
        resutls, lang_availables = _run_test(test_dataloader, ModelClass,
                                             config, results, args,
                                             lang_availables, model)
        with open(result_path, mode='wb') as w:
            pickle.dump(results, w)
        args.logger.info('finished running test')
        if not args.darr:
            args.logger.info('--- Final Performance in Pearson---')
            txt = ""
            for lang in lang_availables:
                txt += '{} : {:.3f}'.format(
                    lang, results['test']['{}_pearson'.format(lang)]) + str(
                        os.linesep)
            txt += 'ave : {:.3f}'.format(
                np.mean([
                    results['test']['{}_pearson'.format(lang)]
                    for lang in lang_availables
                ])) + str(os.linesep)
            txt += 'all : {:.3f}'.format(results['test']['pearson']) + str(
                os.linesep)
            args.logger.info(txt)
            performance_summary_filepath = os.path.join(
                args.tmp_path, 'final_pearformance.txt')
            with open(performance_summary_filepath, mode='w',
                      encoding='utf-8') as w:
                w.write(txt)
            if performance_summary_filepath not in args.tmp_files:
                args.tmp_files.append(performance_summary_filepath)

        args.logger.info('moving tmp files to dump dir')
        for f in args.tmp_files:
            shutil.move(f, args.dump_path)
Example #9
0
def test(class_name, classes, cfg):
  """Tests a model printing batch flow in call() method."""
  ModelClass = get_model_class(class_name)
  model = ModelClass(cfg, classes, verbose=True)
  model.build(input_shape=tuple(cfg.batch_shape))
  model.summary()