Exemple #1
0
 def test_run(ind, matchup_type):
     pokemon_a, pokemon_b = generator.generate(2, matchup_type)
     battle = Battle(Trainer("Trainer A", pokemon_a, q_strat),
                     Trainer("Trainer B", pokemon_b, FullyRandomStrategy()),
                     training_mode=True)
     winner_ind = battle.run()
     return int(winner_ind == 0)
Exemple #2
0
 def _new_battle(self, pokemon_generator: PokemonGenerator) -> Battle:
     pokemon_a, pokemon_b = pokemon_generator.generate(2)
     battle = Battle(Trainer("self", pokemon_a, self),
                     Trainer("sparring partner", pokemon_b,
                             FullyRandomStrategy()),
                     training_mode=True)
     return battle
Exemple #3
0
    def add_trainer():
        request_data = request.get_json()

        try:
            if len(request_data) > 3:
                raise AuthError(
                    {
                        'description':
                        'Please include only the name, gender, and age of trainer.'
                    }, 400)
            if not request_data['name']:
                raise AuthError({'description': 'Trainer name is missing.'},
                                400)
            if not request_data['gender']:
                raise AuthError({'description': 'Trainer gender is missing.'},
                                400)
            if not request_data['age']:
                raise AuthError({'description': 'Trainer age is missing.'},
                                400)
        except AuthError as e:
            abort(e.status_code, e.error)

        new_trainer = Trainer()
        new_trainer.name = request_data['name']
        new_trainer.gender = request_data['gender']
        new_trainer.age = request_data['age']

        new_trainer.insert()

        return jsonify({'success': True, 'new_trainer': new_trainer.format()})
Exemple #4
0
 def select_by_id(self, code):
     cursor = self._db.connection.cursor()
     cursor.execute(SQL_SELECT_BY_ID, [code])
     result = cursor.fetchone()
     if(result is not None):
         return Trainer(result[0], result[1], result[2], result[3])
     else:
         return None
    def test_get_sessions_fail(self):
        trainer = Trainer(name='Mostafa', gender='male', age=28)
        trainer.insert()

        client = Client(name='Yehia', gender='male', age=102)
        client.insert()

        session = Session(name='Mostafa-Yehia training session',
                          trainer_id=trainer.id,
                          client_id=client.id)
        session.insert()

        res = self.client().get(
            '/sessions',
            json={'age': 75},
            headers={'Authorization': 'Bearer {}'.format(manager_jwt_token)})
        data = json.loads(res.data)

        self.assertEqual(res.status_code, 405)
        self.assertEqual(data['success'], False)
        self.assertEqual(data['message'], 'method not allowed')

        trainer.delete()
        client.delete()
        session.delete()
Exemple #6
0
def train_and_predict():
    model = Trainer()
    filename = "Xy.pkl"
    if not os.path.exists(filename):
        Xs, ys = model.generate_features_from_multiple_files(datasets[2:])
        pickle.dump([Xs, ys], open(filename, "wb"))
    else:
        Xs, ys = pickle.load(open(filename, "rb"))
    Xs, ys = np.array(Xs), np.array(ys)

    kf = KFold(n_splits=4)
    print(Xs.shape)
    for train_index, test_index in kf.split(range(4)):
        train_X, test_X = np.concatenate(Xs[train_index],
                                         0), np.concatenate(Xs[test_index], 0)
        train_y, test_y = np.concatenate(ys[train_index],
                                         0), np.concatenate(ys[test_index], 0)
        min_max_scaler = preprocessing.MinMaxScaler()
        # train_X,test_X = min_max_scaler.fit_transform(train_X),min_max_scaler.fit_transform(test_X)
        print(train_X.shape)
        print(datasets[2:][test_index[0]].name)

        feature_index = [55, 59] + [i for i in range(55)]  #,57,58,59,60
        model = MLPRegressor(max_iter=50,
                             hidden_layer_sizes=100,
                             activation="logistic")
        model.n_layers_ = 2
        print(train_X[:2, feature_index])
        reg = model.fit(train_X[:, feature_index], train_y)

        predicted = reg.predict(test_X[:, feature_index])
        print(test_X[:, feature_index].shape)
        # print(reg.coef_)
        from scipy.stats import pearsonr
        print(pearsonr(predicted, test_y))
    def test_client_get_trainers_success(self):
        trainer = Trainer(name='Mostafa', gender='male', age=28)
        trainer.insert()

        res = self.client().get(
            '/trainers',
            headers={'Authorization': 'Bearer {}'.format(client_jwt_token)})
        data = json.loads(res.data)

        self.assertEqual(res.status_code, 200)
        self.assertEqual(data['success'], True)
        self.assertTrue(data['trainers'])

        trainer.delete()
def get_metadata(db_session, betfair_api, event):

    bfl_markets = betfair_api.betting.list_market_catalogue(
        filter=bfl.filters.market_filter(event_ids=[event.betfair_id], ),
        max_results=25,
        market_projection=['RUNNER_METADATA'])
    for bfl_market in bfl_markets:
        market = db_session.query(Market).filter(
            Market.event_id == event.id,
            Market.betfair_id == bfl_market.market_id).one_or_none()
        if market:
            print(f"  Market: {market.name}")
            for bfl_runner in bfl_market.runners:
                runner = db_session.query(Runner).filter(
                    Runner.betfair_id ==
                    bfl_runner.selection_id).one_or_none()
                if runner:
                    print(f"    Runner: {runner.name}")
                    print(
                        f"      Jockey: {bfl_runner.metadata['JOCKEY_NAME']}")
                    print(
                        f"      Trainer: {bfl_runner.metadata['TRAINER_NAME']}"
                    )
                    market_runner = None  #db_session.query(MarketRunner).filter(MarketRunner.market_id == market.id, MarketRunner.runner_id == runner.id).one_or_none()
                    if market_runner:
                        jockey_name = bfl_runner.metadata['JOCKEY_NAME']
                        if jockey_name:
                            jockey = db_session.query(Jockey).filter(
                                Jockey.name == jockey_name).one_or_none()
                            if not jockey:
                                jockey = Jockey(name=jockey_name, )
                                db_session.add(jockey)
                            market_runner.jockey_id = jockey.id
                        trainer_name = bfl_runner.metadata['TRAINER_NAME']
                        if trainer_name:
                            trainer = db_session.query(Trainer).filter(
                                Trainer.name == trainer_name).one_or_none()
                            if not trainer:
                                trainer = Trainer(name=trainer_name, )
                                db_session.add(trainer)
                            market_runner.trainer_id = trainer.id
    db_session.commit()
Exemple #9
0
def addTrainer():
    code = request.form['trainerCode'].replace(' ', '')
    ok = True
    if(len(code) != 12):
        flash('trainer code must contain 12 numbers')
        ok = False
    if(not code.isdecimal()):
        flash('trainer code must be only made of numbers')
        ok = False

    t = trainerDAO.select_by_id(code)
    if(t is not None and t.dateInserted - datetime.now()):
        flash('trainer must wait 24h before sending their code again')
        ok = False
    if(ok):
        team = request.form['team']
        country = request.form['country']
        trainer = Trainer(code, team, country)
        trainerDAO.insert(trainer)
    return redirect(url_for('index'))
    def test_get_trainers_fail(self):
        trainer = Trainer(name='Mostafa', gender='male', age=28)
        trainer.insert()

        res = self.client().get(
            '/trainers',
            headers={'Authorization': 'Bearer {}'.format(manager_jwt_token)},
            json={'age': 75})
        data = json.loads(res.data)

        self.assertEqual(res.status_code, 405)
        self.assertEqual(data['success'], False)
        self.assertEqual(data['message'], 'method not allowed')

        trainer.delete()
Exemple #11
0
 def train_basic(self, dataset, modelname, setting, lr, n_epochs,
                 batch_size):
     ids, X_data, Y_data, ents, gold_mentions = utils.load_data(
         dataset, setting)
     # we need to predict mention pairs
     # in evaluation we need to output the top scoring mention
     model = MentionPairScore(X_data, Y_data, model_name=modelname)
     trainer = Trainer(model,
                       lr=lr,
                       num_epochs=n_epochs,
                       batch_size=batch_size)
     trainer.train()
     ids_val, X_data_val, Y_data_val, ents_val, gold_mentions_val = utils.load_data(
         "test", setting)
     trainer.evaluate(X_data_val, Y_data_val, ents_val, gold_mentions_val,
                      ids_val)
    def test_get_sessions_success(self):
        trainer = Trainer(name='Mostafa', gender='male', age=28)
        trainer.insert()

        client = Client(name='Yehia', gender='male', age=102)
        client.insert()

        session = Session(name='Mostafa-Yehia training session',
                          trainer_id=trainer.id,
                          client_id=client.id)
        session.insert()

        res = self.client().get(
            '/sessions',
            headers={'Authorization': 'Bearer {}'.format(manager_jwt_token)})
        data = json.loads(res.data)

        self.assertEqual(res.status_code, 200)
        self.assertEqual(data['success'], True)
        self.assertTrue(data['sessions'])

        trainer.delete()
        client.delete()
        session.delete()
Exemple #13
0
def main():
    parser = argparse.ArgumentParser(description='referenceSR Testing')
    parser.add_argument('--random_seed', default=0, type=int)
    parser.add_argument('--name',
                        default='test_masa_rec_TestSet_multi',
                        type=str)
    parser.add_argument('--phase', default='test', type=str)

    ## device setting
    parser.add_argument('--gpu_ids',
                        type=str,
                        default='0',
                        help='gpu ids: e.g. 0  0,1,2, 0,2. use -1 for CPU')
    parser.add_argument('--launcher',
                        choices=['none', 'pytorch'],
                        default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)

    ## network setting
    parser.add_argument('--net_name', default='MASA', type=str, help='')
    parser.add_argument('--sr_scale', default=4, type=int)
    parser.add_argument('--input_nc', default=3, type=int)
    parser.add_argument('--output_nc', default=3, type=int)
    parser.add_argument('--nf', default=64, type=int)
    parser.add_argument('--n_blks', default='4, 4, 4', type=str)
    parser.add_argument('--nf_ctt', default=32, type=int)
    parser.add_argument('--n_blks_ctt', default='2, 2, 2', type=str)
    parser.add_argument('--num_nbr', default=1, type=int)
    parser.add_argument('--n_blks_dec', default=10, type=int)
    parser.add_argument('--ref_level', default=1, type=int)

    ## dataloader setting
    parser.add_argument('--data_root',
                        default='/home/liyinglu/newData/datasets/SR/',
                        type=str)
    parser.add_argument('--dataset', default='CUFED', type=str, help='CUFED')
    parser.add_argument('--crop_size', default=256, type=int)
    parser.add_argument('--batch_size', default=1, type=int)
    parser.add_argument('--num_workers', default=4, type=int)
    parser.add_argument('--data_augmentation', default=False, type=bool)

    parser.add_argument('--resume',
                        default='./pretrained_weights/masa_rec.pth',
                        type=str)
    parser.add_argument('--testset',
                        default='TestSet_multi',
                        type=str,
                        help='Sun80 | Urban100 | TestSet_multi')
    parser.add_argument('--save_folder', default='./test_results/', type=str)

    ## setup training environment
    args = parser.parse_args()

    ## setup training device
    str_ids = args.gpu_ids.split(',')
    args.gpu_ids = []
    for str_id in str_ids:
        id = int(str_id)
        if id >= 0:
            args.gpu_ids.append(id)
    if len(args.gpu_ids) > 0:
        torch.cuda.set_device(args.gpu_ids[0])

    #### distributed training settings
    if args.launcher == 'none':  # disabled distributed training
        args.dist = False
        args.rank = -1
        print('Disabled distributed training.')
    else:
        args.dist = True
        init_dist()
        args.world_size = torch.distributed.get_world_size()
        args.rank = torch.distributed.get_rank()

    args.save_folder = os.path.join(args.save_folder, args.testset, args.name)
    if not os.path.exists(args.save_folder):
        os.makedirs(args.save_folder)
    log_file_path = args.save_folder + '/' + time.strftime(
        '%Y%m%d_%H%M%S') + '.log'
    setup_logger(log_file_path)

    print_args(args)
    cudnn.benchmark = True

    ## test model
    trainer = Trainer(args)
    trainer.test()
Exemple #14
0
from argument_parser import argument_parser
from models import Trainer
from utils import table_printer

args = argument_parser()
table_printer(args)

# Initialize a trainer
trainer = Trainer(args)

# load edgelist and split into training, test and validation set
trainer.setup_features()

# Setup a Biological Network Embedding model
trainer.setup_model()
trainer.setup_training_data()
trainer.fit()
trainer.evaluate()
Exemple #15
0
 def create_trainer_from_tuple(trainers):
     return Trainer(trainers[0], trainers[1], trainers[2], trainers[3])
Exemple #16
0
def get_markets(db_session, betfair_api, event):

    markets = []
    bfl_markets = betfair_api.betting.list_market_catalogue(
        filter=bfl.filters.market_filter(
            event_ids=[event.betfair_id],
            market_type_codes=['WIN'],
        ),
        max_results=25,
        market_projection=['MARKET_START_TIME', 'RUNNER_METADATA'])
    for bfl_market in bfl_markets:
        market = db_session.query(Market).filter(
            Market.event_id == event.id,
            Market.betfair_id == bfl_market.market_id).one_or_none()
        if not market:
            print(
                f"New market: {bfl_market.market_start_time}: {bfl_market.market_name} ({len(bfl_market.runners)} runners)"
            )
            market = Market(event_id=event.id,
                            betfair_id=bfl_market.market_id,
                            name=bfl_market.market_name,
                            start_time=bfl_market.market_start_time,
                            total_matched=bfl_market.total_matched)
            db_session.add(market)
        markets.append(market)
        for bfl_runner in bfl_market.runners:
            runner = db_session.query(Runner).filter(
                Runner.betfair_id == bfl_runner.selection_id).one_or_none()
            if not runner:
                runner = Runner(
                    betfair_id=bfl_runner.selection_id,
                    name=bfl_runner.runner_name,
                )
                db_session.add(runner)
            market_runner = db_session.query(MarketRunner).filter(
                MarketRunner.market_id == market.id,
                MarketRunner.runner_id == runner.id).one_or_none()
            if not market_runner:
                market_runner = MarketRunner(
                    market_id=market.id,
                    runner_id=runner.id,
                    sort_priority=bfl_runner.sort_priority)
                jockey_name = bfl_runner.metadata['JOCKEY_NAME']
                if jockey_name:
                    jockey = db_session.query(Jockey).filter(
                        Jockey.name == jockey_name).one_or_none()
                    if not jockey:
                        jockey = Jockey(name=jockey_name, )
                        db_session.add(jockey)
                    market_runner.jockey_id = jockey.id
                trainer_name = bfl_runner.metadata['TRAINER_NAME']
                if trainer_name:
                    trainer = db_session.query(Trainer).filter(
                        Trainer.name == trainer_name).one_or_none()
                    if not trainer:
                        trainer = Trainer(name=trainer_name, )
                        db_session.add(trainer)
                    market_runner.trainer_id = trainer.id
                db_session.add(market_runner)
    db_session.commit()
    return markets
Exemple #17
0
def main():
    warnings.filterwarnings("ignore")
    parser = argparse.ArgumentParser(description='referenceSR Training')
    parser.add_argument('--random_seed', default=0, type=int)
    parser.add_argument('--name', default='train_masa', type=str)
    parser.add_argument('--phase', default='train', type=str)

    ## device setting
    parser.add_argument('--gpu_ids',
                        type=str,
                        default='0',
                        help='gpu ids: e.g. 0  0,1,2, 0,2. use -1 for CPU')
    parser.add_argument('--launcher',
                        choices=['none', 'pytorch'],
                        default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)

    ## network setting
    parser.add_argument('--net_name',
                        default='MASA',
                        type=str,
                        help='RefNet | Baseline')
    parser.add_argument('--sr_scale', default=4, type=int)
    parser.add_argument('--input_nc', default=3, type=int)
    parser.add_argument('--output_nc', default=3, type=int)
    parser.add_argument('--nf', default=64, type=int)
    parser.add_argument('--n_blks', default='4, 4, 4', type=str)
    parser.add_argument('--nf_ctt', default=32, type=int)
    parser.add_argument('--n_blks_ctt', default='2, 2, 2', type=str)
    parser.add_argument('--num_nbr', default=1, type=int)
    parser.add_argument('--n_blks_dec', default=10, type=int)

    ## dataloader setting
    parser.add_argument('--data_root',
                        default='/home/liyinglu/newData/datasets/SR/',
                        type=str)
    parser.add_argument('--dataset', default='CUFED', type=str, help='CUFED')
    parser.add_argument('--testset',
                        default='TestSet',
                        type=str,
                        help='TestSet')
    parser.add_argument('--save_test_root', default='generated', type=str)
    parser.add_argument('--crop_size', default=256, type=int)
    parser.add_argument('--batch_size', default=9, type=int)
    parser.add_argument('--num_workers', default=4, type=int)
    parser.add_argument('--multi_scale', action='store_true')
    parser.add_argument('--data_augmentation', action='store_true')

    ## optim setting
    parser.add_argument('--lr', default=1e-4, type=float)
    parser.add_argument('--lr_D', default=1e-4, type=float)
    parser.add_argument('--weight_decay', default=0, type=float)
    parser.add_argument('--start_iter', default=0, type=int)
    parser.add_argument('--max_iter', default=500, type=int)

    parser.add_argument('--loss_l1', action='store_true')
    parser.add_argument('--loss_mse', action='store_true')
    parser.add_argument('--loss_perceptual', action='store_true')
    parser.add_argument('--loss_adv', action='store_true')
    parser.add_argument('--gan_type', default='WGAN_GP', type=str)

    parser.add_argument('--lambda_l1', default=1, type=float)
    parser.add_argument('--lambda_mse', default=1, type=float)
    parser.add_argument('--lambda_perceptual', default=1, type=float)
    parser.add_argument('--lambda_adv', default=5e-3, type=float)

    parser.add_argument('--resume', default='', type=str)
    parser.add_argument('--resume_optim', default='', type=str)
    parser.add_argument('--resume_scheduler', default='', type=str)

    ## log setting
    parser.add_argument('--log_freq', default=10, type=int)
    parser.add_argument('--vis_freq', default=50000, type=int)  #50000
    parser.add_argument('--save_epoch_freq', default=10, type=int)  #100
    parser.add_argument('--test_freq', default=100, type=int)  #100
    parser.add_argument('--save_folder', default='./weights', type=str)
    parser.add_argument('--vis_step_freq', default=100, type=int)
    parser.add_argument('--use_tb_logger', action='store_true')
    parser.add_argument('--save_test_results', action='store_true')

    ## for evaluate
    parser.add_argument('--ref_level', default=1, type=int)

    ## setup training environment
    args = parser.parse_args()
    set_random_seed(args.random_seed)

    ## setup training device
    str_ids = args.gpu_ids.split(',')
    args.gpu_ids = []
    for str_id in str_ids:
        id = int(str_id)
        if id >= 0:
            args.gpu_ids.append(id)
    if len(args.gpu_ids) > 0:
        torch.cuda.set_device(args.gpu_ids[0])

    #### distributed training settings
    if args.launcher == 'none':  # disabled distributed training
        args.dist = False
        args.rank = -1
        print('Disabled distributed training.')
    else:
        args.dist = True
        init_dist()
        args.world_size = torch.distributed.get_world_size()
        args.rank = torch.distributed.get_rank()

    args.save_folder = os.path.join(args.save_folder, args.name)
    args.vis_save_dir = os.path.join(args.save_folder, 'vis')
    args.snapshot_save_dir = os.path.join(args.save_folder, 'snapshot')
    log_file_path = args.save_folder + '/' + time.strftime(
        '%Y%m%d_%H%M%S') + '.log'

    if args.rank <= 0:
        if os.path.exists(args.vis_save_dir) == False:
            os.makedirs(args.vis_save_dir)
        if os.path.exists(args.snapshot_save_dir) == False:
            os.mkdir(args.snapshot_save_dir)
        setup_logger(log_file_path)

    print_args(args)

    cudnn.benchmark = True

    ## train model
    trainer = Trainer(args)
    trainer.train()