Esempio n. 1
0
def get_decision_tree(data, alpha):
    lg.main.debug("Get decision tree called!")

    node = Node(data)
    if not data.getImpurity(
    ):  #returns the node if there is only 1 class left in the set
        lg.main.debug("Data is pure!")
        return node

    criterion = Criterion(
        data
    )  #creates a criterion object which finds the best feature to split on
    datasets = criterion.split(alpha)
    if not datasets:
        return node

    node.setFeature(
        criterion.bestFeature
    )  #node stores the feature the data is split on for quering later on
    for key, data in datasets:
        lg.main.debug("Adding a child")
        node.addChild(key, get_decision_tree(data, alpha))

    return node
Esempio n. 2
0
import config
import sys

from analysis.performance import Performance
from argument_parser import Parser
from decimal import Decimal
from criterion import Criterion
from exchanges.binance import Binance
from log.logger import Logger

API_KEY = config.BINANCE['api_key']
API_SECRET = config.BINANCE['api_secret']

logger = Logger()
binance = Binance(API_KEY, API_SECRET, logger)
criterias = Criterion(binance, logger)


class Tjur():
    def __init__(self, strategy):
        self.strategy = strategy['strategy']
        self.symbols = strategy['symbol']['symbol']
        self.symbol1 = strategy['symbol'][0]['symbol']
        self.symbol2 = strategy['symbol'][1]['symbol']
        self.steps = strategy['symbol']['filters']['steps']
        self.position = strategy['position']
        self.order_type = strategy['position']['order_type']
        self.amount_type = strategy['position']['amount_type']
        self.position_size = strategy['position']['size']
        self.position_percentage = strategy['position']['percentage']
        self.win_target = strategy['win_target']
Esempio n. 3
0
    dev_loader = pickle.load(open(os.path.join(config['data']['path'], config['data']['dev_loader']), 'rb'))
    test_loader = pickle.load(open(os.path.join(config['data']['path'], config['data']['test_loader']), 'rb'))

    vocabulary = pickle.load(open(os.path.join(config['data']['path'], config['data']['vocabulary']), 'rb'))

    # word2vec weights
    weights = pickle.load(open(os.path.join(config['data']['path'], config['data']['weights']), 'rb'))
    # weights = None

    # model & optimizer & criterion
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    support = int(config['model']['support'])
    model = FewShotInduction(C=int(config['model']['class']),
                             S=support,
                             vocab_size=len(vocabulary),
                             embed_size=int(config['model']['embed_dim']),
                             hidden_size=int(config['model']['hidden_dim']),
                             d_a=int(config['model']['d_a']),
                             iterations=int(config['model']['iterations']),
                             outsize=int(config['model']['relation_dim']),
                             weights=weights).to(device)
    optimizer = optim.Adam(model.parameters(), lr=float(config['model']['lr']))
    criterion = Criterion(way=int(config['model']['class']),
                          shot=int(config['model']['support']))

    # writer
    os.makedirs(config['model']['log_path'], exist_ok=True)
    writer = SummaryWriter(config['model']['log_path'])
    main()
    writer.close()
Esempio n. 4
0
 def __init__(self, data, shape, step):
     Criterion.__init__(self, data, shape, step)
Esempio n. 5
0
    def train(self, wm_model, tool):
        #utils.print_parameter_list(wm_model, wm_model.dseq_parameter_names())

        # load data for pre-training
        print("building data for dseq...")
        tool.build_data(self.hps.train_data,
                        self.hps.valid_data,
                        self.hps.dseq_batch_size,
                        mode='dseq')

        print("train batch num: %d" % (tool.train_batch_num))
        print("valid batch num: %d" % (tool.valid_batch_num))

        #input("please check the parameters, and then press any key to continue >")

        # training logger
        logger = SimpleLogger('train')
        logger.set_batch_num(tool.train_batch_num)
        logger.set_log_steps(self.hps.dseq_log_steps)
        logger.set_log_path(self.hps.dseq_train_log_path)
        logger.set_rate('learning_rate', 0.0)
        logger.set_rate('teach_ratio', 1.0)

        # build optimizer
        opt = torch.optim.AdamW(wm_model.dseq_parameters(),
                                lr=1e-3,
                                betas=(0.9, 0.99),
                                weight_decay=self.hps.weight_decay)
        optimizer = ISRScheduler(optimizer=opt,
                                 warmup_steps=self.hps.dseq_warmup_steps,
                                 max_lr=self.hps.dseq_max_lr,
                                 min_lr=self.hps.dseq_min_lr,
                                 init_lr=self.hps.dseq_init_lr,
                                 beta=0.6)

        wm_model.train()

        criterion = Criterion(self.hps.pad_idx)

        # tech forcing ratio decay
        tr_decay_tool = ExponentialDecay(self.hps.dseq_burn_down_tr,
                                         self.hps.dseq_decay_tr,
                                         self.hps.dseq_min_tr)

        # train
        for epoch in range(1, self.hps.dseq_epoches + 1):

            self.run_train(wm_model, tool, optimizer, criterion, logger)

            if epoch % self.hps.dseq_validate_epoches == 0:
                print("run validation...")
                wm_model.eval()
                print("in training mode: %d" % (wm_model.training))
                self.run_validation(epoch, wm_model, criterion, tool,
                                    optimizer.rate())
                wm_model.train()
                print("validation Done: %d" % (wm_model.training))


            if (self.hps.dseq_save_epoches >= 1) and \
                (epoch % self.hps.dseq_save_epoches) == 0:
                # save checkpoint
                print("saving model...")
                utils.save_checkpoint(self.hps.model_dir,
                                      epoch,
                                      wm_model,
                                      prefix="dseq")

            logger.add_epoch()

            print("teach forcing ratio decay...")
            wm_model.set_teach_ratio(tr_decay_tool.do_step())
            logger.set_rate('teach_ratio', tr_decay_tool.get_rate())

            print("shuffle data...")
            tool.shuffle_train_data()
Esempio n. 6
0
    def train(self, wm_model, tool):
        #utils.print_parameter_list(wm_model)
        # load data for pre-training
        print("building data for wm...")
        tool.build_data(self.hps.train_data,
                        self.hps.valid_data,
                        self.hps.batch_size,
                        mode='wm')

        print("train batch num: %d" % (tool.train_batch_num))
        print("valid batch num: %d" % (tool.valid_batch_num))

        #input("please check the parameters, and then press any key to continue >")

        # training logger
        logger = SimpleLogger('train')
        logger.set_batch_num(tool.train_batch_num)
        logger.set_log_steps(self.hps.log_steps)
        logger.set_log_path(self.hps.train_log_path)
        logger.set_rate('learning_rate', 0.0)
        logger.set_rate('teach_ratio', 1.0)
        logger.set_rate('temperature', 1.0)

        # build optimizer
        opt = torch.optim.AdamW(wm_model.parameters(),
                                lr=1e-3,
                                betas=(0.9, 0.99),
                                weight_decay=self.hps.weight_decay)
        optimizer = ISRScheduler(optimizer=opt,
                                 warmup_steps=self.hps.warmup_steps,
                                 max_lr=self.hps.max_lr,
                                 min_lr=self.hps.min_lr,
                                 init_lr=self.hps.init_lr,
                                 beta=0.6)

        wm_model.train()

        null_idxes = tool.load_function_tokens(self.hps.data_dir +
                                               "fchars.txt").to(self.device)
        wm_model.set_null_idxes(null_idxes)

        criterion = Criterion(self.hps.pad_idx)

        # change each epoch
        tr_decay_tool = ExponentialDecay(self.hps.burn_down_tr,
                                         self.hps.decay_tr, self.hps.min_tr)
        # change each iteration
        self.tau_decay_tool = ExponentialDecay(0, self.hps.tau_annealing_steps,
                                               self.hps.min_tau)

        # -----------------------------------------------------------
        # train with all data
        for epoch in range(1, self.hps.max_epoches + 1):

            self.run_train(wm_model, tool, optimizer, criterion, logger)

            if epoch % self.hps.validate_epoches == 0:
                print("run validation...")
                wm_model.eval()
                print("in training mode: %d" % (wm_model.training))
                self.run_validation(epoch, wm_model, criterion, tool,
                                    optimizer.rate())
                wm_model.train()
                print("validation Done: %d" % (wm_model.training))


            if (self.hps.save_epoches >= 1) and \
                (epoch % self.hps.save_epoches) == 0:
                # save checkpoint
                print("saving model...")
                utils.save_checkpoint(self.hps.model_dir,
                                      epoch,
                                      wm_model,
                                      prefix="wm")

            logger.add_epoch()

            print("teach forcing ratio decay...")
            wm_model.set_teach_ratio(tr_decay_tool.do_step())
            logger.set_rate('teach_ratio', tr_decay_tool.get_rate())

            print("shuffle data...")
            tool.shuffle_train_data()
Esempio n. 7
0
def balance_team(file_path, no_of_teams, target_path):
	# read the liga csv file exported from the database
	player_list = parse_liga_csv_file(file_path)

	criterion = Criterion(player_list)
	criterion.compute_height_score()
	criterion.compute_weight_score()
	criterion.compute_years_playing_score()
	criterion.compute_past_achievement_score()

	criterion.compute_total_score()

	Position_Center, Position_Forward, Position_Guard = group_positions(player_list)

	ranked_guard = sorted(Position_Guard, key=lambda x: x[3], reverse=False)
	ranked_forward = sorted(Position_Forward, key=lambda x: x[3], reverse=False)
	ranked_center = sorted(Position_Center, key=lambda x: x[3], reverse=False)

	master_list = rank(len(player_list), ranked_center, ranked_forward, ranked_guard)

	seed_players(len(player_list), no_of_teams, target_path, master_list)
Esempio n. 8
0
def train():
    transform, lidar_transform, mask_transform = get_transform()

    # full dir for processed data
    proc_data = os.path.join(ROOT, config['proc_data'])

    train_dataset = TreeDataset(proc_data,
                                transform=transform,
                                lidar_transform=lidar_transform,
                                mask_transform=mask_transform,
                                use_lidar=args.use_lidar,
                                purpose='train')

    val_dataset = TreeDataset(proc_data,
                              transform=transform,
                              lidar_transform=lidar_transform,
                              mask_transform=mask_transform,
                              use_lidar=args.use_lidar,
                              purpose='val')

    num_models = len(os.listdir(CKPDIR))
    if args.resume:
        lastest_model = 'model_{}.pth'.format(num_models)
        ckp_path = os.path.join(CKPDIR, lastest_model)
        model.load_state_dict(torch.load(ckp_path, map_location=device))
        start_epoch = num_models
    else:
        # start training from epoch 1
        # remove all existing ckps
        start_epoch = 1

        if num_models > 1 and args.debug == False:
            print(
                "Removing existing ckps in {}, this may take a while.".format(
                    CKPDIR))
            for ckp in os.listdir(CKPDIR):
                os.remove(os.path.join(CKPDIR, ckp))

    criterion = Criterion()
    trainer = Trainer(train_dataset=train_dataset,
                      val_dataset=val_dataset,
                      model=model,
                      criterion=criterion,
                      ckp_dir=CKPDIR,
                      log_dir=LOGDIR,
                      debug=args.debug,
                      use_lidar=args.use_lidar,
                      batch_size=args.batch_size,
                      lr=args.lr,
                      weight_decay=args.weight_decay,
                      threshold=args.threshold,
                      start_epoch=start_epoch,
                      resume=args.resume,
                      epochs=args.epochs,
                      print_freq=args.print_freq)

    for epoch in range(start_epoch, start_epoch + args.epochs):
        start = time.time()
        trainer(epoch)
        trainer.validate(epoch)
        end = time.time()

        print("Time to train one epoch is: {:0.2f}".format(end - start))
        if args.debug == False:
            trainer.logger.save_log()

    return
 def __init__(self, max_features=None):
     Criterion.__init__(self, max_features)
Esempio n. 10
0

args = parser.parse_args()

use_cuda = torch.cuda.is_available() and not args.no_cuda
device = torch.device('cuda:%d' % args.gpu_id if use_cuda else 'cpu')

train_data = dataset.Data(os.path.join('data', args.dataset), size=args.image_size, mode='train', device=device)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True)

val_data = dataset.Data(os.path.join('data', args.dataset), size=args.image_size, mode='val')
val_loader = DataLoader(val_data, batch_size=args.batch_size, shuffle=False)

model = Model()
model = model.to(device)
criterion = Criterion()
optimiser = optim.Adam(model.parameters(), lr=args.lr)

date_time = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_")
writer = SummaryWriter(os.path.join('runs', date_time + args.model_name))

load_dir = os.path.join('checkpoint', args.model_name)
if args.resume > 0 and os.path.exists(load_dir):
    model.load_state_dict(torch.load(os.path.join(load_dir, 'model.pth')))
    optimiser.load_state_dict(torch.load(os.path.join(load_dir, 'optimiser.pth')))
    args.epoch_start = torch.load(os.path.join(load_dir, 'epoch.pth'))['epoch']


for epoch in range(args.epoch_start, args.epoch_start + args.epoch_num):
    model.train()
    train_losses = []