Exemple #1
0
def test(data_loader, model, logger, epoch):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        for i, input in enumerate(data_loader):
            input = collate(input)
            input_size = input['img'].size(0)
            input = to_device(input, cfg['device'])
            output = model(input)
            output['loss'] = output['loss'].mean(
            ) if cfg['world_size'] > 1 else output['loss']
            evaluation = metric.evaluate(cfg['metric_name']['test'], input,
                                         output)
            logger.append(evaluation, 'test', input_size)
        logger.append(evaluation, 'test')
        info = {
            'info': [
                'Model: {}'.format(cfg['model_tag']),
                'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)
            ]
        }
        logger.append(info, 'test', mean=False)
        logger.write('test', cfg['metric_name']['test'])
        input['reconstruct'] = True
        input['z'] = output['z']
        output = model.reverse(input)
        save_img(input['img'][:100],
                 './output/vis/input_{}.png'.format(cfg['model_tag']),
                 range=(-1, 1))
        save_img(output['img'][:100],
                 './output/vis/output_{}.png'.format(cfg['model_tag']),
                 range=(-1, 1))
    return
def test(dataset, data_split, label_split, model, logger, epoch):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        for m in range(cfg['num_users']):
            data_loader = make_data_loader({'test': SplitDataset(dataset, data_split[m])})['test']
            for i, input in enumerate(data_loader):
                input = collate(input)
                input_size = input['img'].size(0)
                input['label_split'] = torch.tensor(label_split[m])
                input = to_device(input, cfg['device'])
                output = model(input)
                output['loss'] = output['loss'].mean() if cfg['world_size'] > 1 else output['loss']
                evaluation = metric.evaluate(cfg['metric_name']['test']['Local'], input, output)
                logger.append(evaluation, 'test', input_size)
        data_loader = make_data_loader({'test': dataset})['test']
        for i, input in enumerate(data_loader):
            input = collate(input)
            input_size = input['img'].size(0)
            input = to_device(input, cfg['device'])
            output = model(input)
            output['loss'] = output['loss'].mean() if cfg['world_size'] > 1 else output['loss']
            evaluation = metric.evaluate(cfg['metric_name']['test']['Global'], input, output)
            logger.append(evaluation, 'test', input_size)
        info = {'info': ['Model: {}'.format(cfg['model_tag']),
                         'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)]}
        logger.append(info, 'test', mean=False)
        logger.write('test', cfg['metric_name']['test']['Local'] + cfg['metric_name']['test']['Global'])
    return
 def test_add(self):
     logger.info("test_add")
     for x in xrange(10):
         Metric.add("test.add")
     self.wait_buf(lines_for_add(10))
     for line in self.buf:
         self.assertTrue(line.startswith("stats.test.add 1.0"))
def train(data_loader, model, optimizer, logger, epoch):
    metric = Metric()
    model.train(True)
    start_time = time.time()
    for i, input in enumerate(data_loader):
        input = collate(input)
        input_size = input['img'].size(0)
        input = to_device(input, cfg['device'])
        optimizer.zero_grad()
        output = model(input)
        output['loss'] = output['loss'].mean() if cfg['world_size'] > 1 else output['loss']
        output['loss'].backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
        optimizer.step()
        evaluation = metric.evaluate(cfg['metric_name']['train'], input, output)
        logger.append(evaluation, 'train', n=input_size)
        if i % int((len(data_loader) * cfg['log_interval']) + 1) == 0:
            batch_time = (time.time() - start_time) / (i + 1)
            lr = optimizer.param_groups[0]['lr']
            epoch_finished_time = datetime.timedelta(seconds=round(batch_time * (len(data_loader) - i - 1)))
            exp_finished_time = epoch_finished_time + datetime.timedelta(
                seconds=round((cfg['num_epochs'] - epoch) * batch_time * len(data_loader)))
            info = {'info': ['Model: {}'.format(cfg['model_tag']),
                             'Train Epoch: {}({:.0f}%)'.format(epoch, 100. * i / len(data_loader)),
                             'Learning rate: {}'.format(lr), 'Epoch Finished Time: {}'.format(epoch_finished_time),
                             'Experiment Finished Time: {}'.format(exp_finished_time)]}
            logger.append(info, 'train', mean=False)
            logger.write('train', cfg['metric_name']['train'])
    return
Exemple #5
0
    def __init__(self, hparams, split_table_path, split_table_name, debug_folder, model, gpu,downsample):

        # load the model

        self.hparams = hparams
        self.model = model
        self.gpu = gpu
        self.downsample = downsample

        print('\n')
        print('Selected Learning rate:', self.hparams['lr'])
        print('\n')

        self.debug_folder = debug_folder
        self.split_table_path = split_table_path
        self.split_table_name = split_table_name
        self.exclusions = ['S0431',
                           'S0326'
                           'S0453'
                           'S0458'
                           'A5766'
                           'A0227'
                           'A0238'
                           'A1516'
                           'A5179'
                           'Q1807'
                           'Q3568'
                           'E10256'
                           'E07341'
                           'E05758']


        self.splits = self.load_split_table()
        self.metric = Metric()
Exemple #6
0
def main():
    # parse commands
    parser = ArgParser(sys.argv, mode='assess')
    args = Args(parser, mode='assess')

    # calculate STOI and SNR scores
    metric = Metric(args)
    metric.getSTOI()
 def test_timing_exact(self):
     Metric.timing("exact.time", 1.337)
     self.wait_buf(lines_for_timing(1))
     stripped_timestamps = [" ".join(line.split(" ")[:-1]) for line in self.buf]
     self.assertTrue("stats.timers.exact.time.lower 1337.0" in stripped_timestamps)
     self.assertTrue("stats.timers.exact.time.count 1" in stripped_timestamps)
     self.assertTrue("stats.timers.exact.time.mean 1337.0" in stripped_timestamps)
     self.assertTrue("stats.timers.exact.time.upper 1337.0" in stripped_timestamps)
     self.assertTrue("stats.timers.exact.time.upper_100 1337.0" in stripped_timestamps)
Exemple #8
0
    def __init__(self, input_size, n_channels, hparams):

        self.hparams = hparams

        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")

        # define the models
        self.model = WaveNet(n_channels=n_channels).to(self.device)
        summary(self.model, (input_size, n_channels))
        # self.model.half()

        if torch.cuda.device_count() > 1:
            print("Number of GPUs will be used: ",
                  torch.cuda.device_count() - 3)
            self.model = DP(self.model,
                            device_ids=list(
                                range(torch.cuda.device_count() - 3)))
        else:
            print('Only one GPU is available')

        self.metric = Metric()
        self.num_workers = 1
        ########################## compile the model ###############################

        # define optimizer
        self.optimizer = torch.optim.Adam(params=self.model.parameters(),
                                          lr=self.hparams['lr'],
                                          weight_decay=1e-5)

        # weights = torch.Tensor([0.025,0.033,0.039,0.046,0.069,0.107,0.189,0.134,0.145,0.262,1]).cuda()
        self.loss = nn.BCELoss()  # CompLoss(self.device)

        # define early stopping
        self.early_stopping = EarlyStopping(
            checkpoint_path=self.hparams['checkpoint_path'] + '/checkpoint.pt',
            patience=self.hparams['patience'],
            delta=self.hparams['min_delta'],
        )
        # lr cheduler
        self.scheduler = ReduceLROnPlateau(
            optimizer=self.optimizer,
            mode='max',
            factor=0.2,
            patience=3,
            verbose=True,
            threshold=self.hparams['min_delta'],
            threshold_mode='abs',
            cooldown=0,
            eps=0,
        )

        self.seed_everything(42)
        self.threshold = 0.75
        self.scaler = torch.cuda.amp.GradScaler()
Exemple #9
0
def exp2():
    """ Plot ged and time. """
    dataset = 'aids50'
    models = BASELINE_MODELS
    rs = load_results_as_dict(
        dataset, models,
        row_graphs=load_data(dataset, train=False).graphs,
        col_graphs=load_data(dataset, train=True).graphs)
    metrics = [Metric('ged', 'ged'), Metric('time', 'time (msec)')]
    for metric in metrics:
        plot_ged_time_helper(dataset, models, metric, rs)
Exemple #10
0
def test(data_loader, model):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        for i, input in enumerate(data_loader):
            input = collate(input)
            input = to_device(input, config.PARAM['device'])
            output = model(input)
            output['loss'] = output['loss'].mean() if config.PARAM['world_size'] > 1 else output['loss']
        evaluation = metric.evaluate(config.PARAM['metric_names']['test'], input, output)
    print(evaluation)
    return evaluation
Exemple #11
0
def main():
    st = pst.SpeedTest()

    csv_file = os.path.join(os.path.dirname(__file__), "data", "metrics.csv")
    csv_writer = MetricWriterCSV(csv_file)

    json_file = os.path.join(os.path.dirname(__file__), "data", "metrics.json")
    json_writer = MetricWriterJSON(json_file)

    while True:
        try:
            ping = st.ping()
            upload = st.upload()
            download = st.download()
        except KeyboardInterrupt:
            sys.exit(0)
        except Exception:
            ping, upload, download = 0.0, 0.0, 0.0
            is_online = False
        else:
            is_online = True

        metric = Metric(ping, upload, download, is_online)

        csv_writer.write(metric)
        json_writer.write(metric)
        print(metric)
Exemple #12
0
def test(dataset, model, logger, epoch):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        batch_dataset = BatchDataset(dataset, cfg['bptt'])
        for i, input in enumerate(batch_dataset):
            input_size = input['label'].size(0)
            input = to_device(input, cfg['device'])
            output = model(input)
            output['loss'] = output['loss'].mean() if cfg['world_size'] > 1 else output['loss']
            evaluation = metric.evaluate(cfg['metric_name']['test'], input, output)
            logger.append(evaluation, 'test', input_size)
        info = {'info': ['Model: {}'.format(cfg['model_tag']), 'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)]}
        logger.append(info, 'test', mean=False)
        logger.write('test', cfg['metric_name']['test'])
    return
Exemple #13
0
    def __init__(self, config):
        self.config = config
        self.nLayer = config.getint("Architecture", "layer")
        self.nNodes = config.getint("Architecture", "nodes")
        self.nIter = config.getint("Architecture", "iterations")
        self.etha = config.getfloat("Factors", "initlearnrate")
        self.alpha = config.getfloat("Factors", "momentum")
        self.steepness = config.getfloat("Factors", "steepness")
        self.stepsizedec = config.getfloat("Factors", "stepsizedec")
        self.stepsizeinc = config.getfloat("Factors", "stepsizeinc")
        self.offset = config.getfloat("Factors", "initoffset")
        self.mindpp = config.getfloat("Thresholds", "mindpp")
        self.mindsse = config.getfloat("Thresholds", "mindsse")
        self.mindsumweights = config.getfloat("Thresholds", "mindsumweights")
        self.actfunc = config.get("Architecture", "activation")
        self.weightsinit = config.get("Architecture", "initweights")
        self.errfct = config.get("Architecture", "errorfunction")
        self.metrics = Metric(config.get("Output", "metrics"), config.getint("Output", "metricsclass"))
        self.verbosity = config.getint("Output", "verbosity")
        self.interactive = config.getboolean("Output", "interactive")

        self.weights = []
        self.outs = []
        self.deltas = []

        self.generateActivationFunction()
Exemple #14
0
    def extract(self, context, data):
        context['origin'] = 'mw_profiler'

        metric_defs = {
            'response_time': MetricType.TIME,
            'database.queries.list': MetricType.QUERY_LIST,
            'database.queries.time': MetricType.TIME,
            'database.queries.master_count': MetricType.COUNT,
            'database.queries.slave_count': MetricType.COUNT,
            'memcached.time': MetricType.TIME,
            'memcached.miss_count': MetricType.COUNT,
            'memcached.hit_count': MetricType.COUNT,
            'memcached.dupe_count': MetricType.COUNT,
        }

        name_template = 'server.app.{}'
        metrics = {
            name: Metric(name_template.format(name), context, type)
            for name, type in metric_defs.items()
        }

        for single_run in data:
            data = self.parse_data(single_run['content'])
            for name, raw_value in data.items():
                metrics[name].add_value(raw_value, None)

        return Collection(metrics.values())
Exemple #15
0
 def test_timing_exact(self):
     Metric.timing("exact.time", 1.337)
     self.wait_buf(lines_for_timing(1))
     stripped_timestamps = [
         " ".join(line.split(" ")[:-1]) for line in self.buf
     ]
     self.assertTrue(
         "stats.timers.exact.time.lower 1337.0" in stripped_timestamps)
     self.assertTrue(
         "stats.timers.exact.time.count 1" in stripped_timestamps)
     self.assertTrue(
         "stats.timers.exact.time.mean 1337.0" in stripped_timestamps)
     self.assertTrue(
         "stats.timers.exact.time.upper 1337.0" in stripped_timestamps)
     self.assertTrue(
         "stats.timers.exact.time.upper_100 1337.0" in stripped_timestamps)
Exemple #16
0
def test(created):
    with torch.no_grad():
        metric = Metric()
        created = torch.tensor(created / 255 * 2 - 1)
        valid_mask = torch.sum(torch.isnan(created), dim=(1, 2, 3)) == 0
        created = created[valid_mask]
        label = torch.arange(cfg['classes_size'])
        label = label.repeat(cfg['generate_per_mode'])
        label = label[valid_mask]
        output = {'img': created, 'label': label}
        evaluation = metric.evaluate(cfg['metric_name']['test'], None, output)
    dbi_result = evaluation['DBI']
    print('Davies-Bouldin Index ({}): {}'.format(cfg['model_tag'], dbi_result))
    save(dbi_result,
         './output/result/dbi_created_{}.npy'.format(cfg['model_tag']),
         mode='numpy')
    return evaluation
def test(data_loader, model, logger, epoch):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        for i, input in enumerate(data_loader):
            input = collate(input)
            input_size = len(input['img'])
            input = to_device(input, config.PARAM['device'])
            output = model(input)
            output['loss'] = output['loss'].mean() if config.PARAM['world_size'] > 1 else output['loss']
            evaluation = metric.evaluate(config.PARAM['metric_names']['test'], input, output)
            logger.append(evaluation, 'test', input_size)
        info = {'info': ['Model: {}'.format(config.PARAM['model_tag']),
                         'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)]}
        logger.append(info, 'test', mean=False)
        logger.write('test', config.PARAM['metric_names']['test'])
    return
def test(generated):
    with torch.no_grad():
        metric = Metric()
        generated = torch.tensor(generated / 255 * 2 - 1)
        valid_mask = torch.sum(torch.isnan(generated), dim=(1, 2, 3)) == 0
        generated = generated[valid_mask]
        output = {'img': generated}
        evaluation = metric.evaluate(cfg['metric_name']['test'], None, output)
    is_result, fid_result = evaluation['InceptionScore'], evaluation['FID']
    print('Inception Score ({}): {}'.format(cfg['model_tag'], is_result))
    print('FID ({}): {}'.format(cfg['model_tag'], fid_result))
    save(is_result,
         './output/result/is_generated_{}.npy'.format(cfg['model_tag']),
         mode='numpy')
    save(fid_result,
         './output/result/fid_generated_{}.npy'.format(cfg['model_tag']),
         mode='numpy')
    return evaluation
Exemple #19
0
    def fan_out_by_url_and_push_metrics(metrics, out_name, base_context,
                                        data_type, values):
        by_url = defaultdict(list)
        for url, value in values:
            by_url[url].append(value)

        for url, values in by_url.items():
            context = base_context.copy()
            context['url'] = url
            metrics.add(Metric(out_name, context, data_type, values=values))
Exemple #20
0
def validate_epoch(val_loader, model, criterion, epoch, device):

    model.eval()

    val_loss = 0
    metric = Metric(['accuracy', 'mean_iou'], len(val_loader.dataset.classes))
    for image, label in tqdm(val_loader, total=len(val_loader)):
        image = image.to(device)
        label = label.to(device)

        with torch.no_grad():
            pred = model(image)
        loss = criterion(pred, label) / len(image)

        val_loss += loss.item()
        metric.update(pred.data.cpu().numpy(), label.data.cpu().numpy())

    metrics = metric.compute()
    return val_loss / len(val_loader), metrics['accuracy'], metrics['mean_iou']
Exemple #21
0
 def test_timing(self):
     logger.info("test_timing")
     for x in xrange(10):
         timer = Metric.start_timing("test.timing")
         primes = get_prime_list(NUM_PRIMES)
         timer.done()
         logger.debug("Got %d primes", len(primes))
     self.wait_buf(lines_for_timing(10))
     for line in self.buf:
         self.assertTrue(line.startswith("stats.timers.test.timing"))
 def test_timing(self):
     logger.info("test_timing")
     for x in xrange(10):
         timer = Metric.start_timing("test.timing")
         primes = get_prime_list(NUM_PRIMES)
         timer.done()
         logger.debug("Got %d primes", len(primes))
     self.wait_buf(lines_for_timing(10))
     for line in self.buf:
         self.assertTrue(line.startswith("stats.timers.test.timing"))
Exemple #23
0
    def extract(self, context, data):
        context['origin'] = 'requests'

        metrics = Collection()
        metrics.add(
            Metric('server.app.response_time',
                   context,
                   MetricType.TIME,
                   values=[(float(single_run['time']), None)
                           for single_run in data]))

        metrics.add(
            Metric('server.app.response_size',
                   context,
                   MetricType.BYTES,
                   values=[(single_run['content_length'], None)
                           for single_run in data]))

        return metrics
 def train(self, local_parameters, lr, logger):
     metric = Metric()
     model = eval('models.{}(model_rate=self.model_rate).to(cfg["device"])'.format(cfg['model_name']))
     model.load_state_dict(local_parameters)
     model.train(True)
     optimizer = make_optimizer(model, lr)
     for local_epoch in range(1, cfg['num_epochs']['local'] + 1):
         for i, input in enumerate(self.data_loader):
             input = collate(input)
             input_size = input['img'].size(0)
             input['label_split'] = torch.tensor(self.label_split)
             input = to_device(input, cfg['device'])
             optimizer.zero_grad()
             output = model(input)
             output['loss'].backward()
             torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
             optimizer.step()
             evaluation = metric.evaluate(cfg['metric_name']['train']['Local'], input, output)
             logger.append(evaluation, 'train', n=input_size)
     local_parameters = model.state_dict()
     return local_parameters
Exemple #25
0
    def __init__(self, df_m15, df_h1, serial=False):
        self.df_m15 = standardize_data(
            df_m15, method="log_and_diff").dropna().reset_index(drop=True)
        self.df_h1 = df_h1
        self.net_worth = INITIAL_BALANCE
        self.prev_net_worth = INITIAL_BALANCE
        self.usd_held = INITIAL_BALANCE
        self.eur_held = 0
        self.current_step = 0
        self.reward = 0
        self.serial = serial
        # trade history
        self.trades = []
        # our profit in last 5 trades
        self.returns = np.zeros(10)

        # index of episodes (1 episode equivalent to 1 week of trading)
        self.episode_indices_m15, self.h1_indices = get_episode(
            self.df_m15, self.df_h1)
        self.action_space = spaces.Discrete(6)
        # observation space, includes: OLHC prices (normalized), close price (unnormalized),
        # time in minutes(encoded), day of week(encoded), action history, net worth changes history
        # both minutes, days feature are encoded using sin and cos function to retain circularity
        self.observation_space = spaces.Box(low=-10,
                                            high=10,
                                            shape=(12, WINDOW_SIZE + 1),
                                            dtype=np.float16)
        self.metrics = Metric(INITIAL_BALANCE)
        self.setup_active_df()
        self.agent_history = {
            "actions":
            np.zeros(len(self.active_df) + WINDOW_SIZE),
            "net_worth":
            np.zeros(len(self.active_df) + WINDOW_SIZE),
            "eur_held":
            np.zeros(len(self.active_df) + WINDOW_SIZE),
            "usd_held":
            np.full(len(self.active_df), self.usd_held / BALANCE_NORM_FACTOR)
        }
Exemple #26
0
    def extract(self, context, data):
        context['origin'] = 'phantomas'

        metrics = Collection()

        if len(data) == 0:
            return metrics

        metric_names = set()
        for run in data:
            metric_names.update(run['metrics'].keys())

        for metric_name in metric_names:
            raw_id = 'raw.phantomas.' + metric_name
            id = raw_id
            type = MetricType.UNKNOWN

            if metric_name in self.KNOWN_METRICS:
                metric_def = self.KNOWN_METRICS[metric_name]
                if ':' in metric_def:
                    type, id = metric_def.split(':')
                else:
                    id = metric_def

            raw_values_and_infos = [
                (self.normalize_phantomas_value(run['metrics'][metric_name],
                                                type),
                 run['offenders'][metric_name]
                 if metric_name in run['offenders'] else None) for run in data
            ]

            metrics.add(Metric(id, context, type, None, raw_values_and_infos))
            if id != raw_id:
                metrics.add(
                    Metric(raw_id, context, type, None, raw_values_and_infos))

        return metrics
def test(model, logger, epoch):
    sample_per_iter = cfg['batch_size']['test']
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        C = torch.arange(cfg['classes_size'])
        C = C.repeat(cfg['generate_per_mode'])
        cfg['z'] = torch.randn([C.size(0), cfg['gan']['latent_size']]) if 'z' not in cfg else cfg['z']
        C_generated = torch.split(C, sample_per_iter)
        z_generated = torch.split(cfg['z'], sample_per_iter)
        generated = []
        for i in range(len(C_generated)):
            C_generated_i = C_generated[i].to(cfg['device'])
            z_generated_i = z_generated[i].to(cfg['device'])
            generated_i = model.generate(C_generated_i, z_generated_i)
            generated.append(generated_i.cpu())
        generated = torch.cat(generated)
        output = {'img': generated}
        evaluation = metric.evaluate(cfg['metric_name']['test'], None, output)
        logger.append(evaluation, 'test')
        info = {'info': ['Model: {}'.format(cfg['model_tag']), 'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)]}
        logger.append(info, 'test', mean=False)
        logger.write('test', cfg['metric_name']['test'])
    return
 def __init__(self, env=None, mdp=None, policy=None):
     self.env = env
     self.mdp = mdp
     self.policy = policy
     self.model = self.get_model()
     # create metrics
     self.metrics = {'train': {}, 'test': {}, 'replay': {}}
     for mode in self.metrics.keys():
         self.metrics[mode]['accumulated_reward'] = Metric(
             name='accumlated reward')
         self.metrics[mode]['reward'] = Metric(name='reward')
         self.metrics[mode]['pred_reward'] = Metric(name='reward')
         self.metrics[mode]['q_vals'] = Metric(name='q values')
         self.metrics[mode]['loss'] = Metric(name='loss')
         self.metrics[mode]['epsilon'] = Metric("epsilon")
         self.metrics[mode]['action'] = Metric("action")
Exemple #29
0
class PostProcessing():
    def __init__(self, fold):

        self.fold = fold

        self.threshold = float(open(f"threshold_{self.fold}.txt",
                                    "r").read())  #0.5#0.1
        self.metric = Metric()

    def run(self, predictions):

        predictions_processed = predictions.copy()

        #if somth is found, its not a normal
        predictions_processed[np.where(
            predictions_processed >= self.threshold)] = 1
        predictions_processed[np.where(
            predictions_processed < self.threshold)] = 0

        return predictions_processed

    def find_opt_thresold(self, labels, outputs):

        threshold_grid = np.arange(0.05, 0.99, 0.05).tolist()
        threshold_opt = np.zeros((27))

        unit_threshold = partial(self._unit_threshold,
                                 labels=labels,
                                 outputs=outputs)

        start = time.time()
        with ProcessPoolExecutor(max_workers=20) as pool:
            result = pool.map(unit_threshold, threshold_grid)
        scores = list(result)
        print(f'Processing time: {(time.time() - start)/60}')

        # print('Finding the optimal threshold')
        # for threshold in tqdm(threshold_grid):
        #
        #     predictions = outputs.copy()
        #
        #     predictions[np.where(predictions >= threshold)] = 1
        #     predictions[np.where(predictions < threshold)] = 0
        #
        #     scores.append(self.metric.compute(labels, predictions))

        scores = np.array(scores)
        a = np.where(scores == np.max(scores))
        if len(a) > 1:
            a = [0]
            threshold_opt = threshold_grid[a[0]]
        else:
            threshold_opt = threshold_grid[a[0][0]]

        return threshold_opt

    def _unit_threshold(self, threshold, labels, outputs):

        predictions = outputs.copy()

        predictions[np.where(predictions >= threshold)] = 1
        predictions[np.where(predictions < threshold)] = 0

        return self.metric.compute(labels, predictions)

    def update_threshold(self, threshold):
        f = open(f"threshold_{self.fold}.txt", "w")
        f.write(str(threshold))
        f.close()
        self.threshold = threshold
Exemple #30
0
    device = torch.device('cpu')
    '''
    config = {
        "train": {
            "args": {
                "epochs": 5
            }
        }
    }
    '''
    path = 'config.json'
    config = json.load(open(path))
    net = ToyModel(64, 13)
    criterion = nn.CrossEntropyLoss(ignore_index=-1, reduction='mean')
    optimizer = optim.Adam(net.parameters())
    metric = Metric(nclasses=13)
    trainer = Trainer(device, config, net, criterion, optimizer, metric)

    train_dataset = [(torch.randn(size=(3, 100, 100)),
                    torch.randn(size=(100, 100)),
                    torch.randint(low=-1, high=13, size=(100, 100)).long())
                    for _ in range(99)]
    train_dataloader = data.DataLoader(train_dataset, batch_size=4)

    test_dataset = [(torch.randn(size=(3, 100, 100)),
                    torch.randn(size=(100, 100)),
                    torch.randint(low=-1, high=13, size=(100, 100)).long())
                    for _ in range(99)]
    test_dataloader = data.DataLoader(test_dataset, batch_size=4)

    trainer.train(train_dataloader, test_dataloader)
Exemple #31
0
class NN:
    def __init__(self, config):
        self.config = config
        self.nLayer = config.getint("Architecture", "layer")
        self.nNodes = config.getint("Architecture", "nodes")
        self.nIter = config.getint("Architecture", "iterations")
        self.etha = config.getfloat("Factors", "initlearnrate")
        self.alpha = config.getfloat("Factors", "momentum")
        self.steepness = config.getfloat("Factors", "steepness")
        self.stepsizedec = config.getfloat("Factors", "stepsizedec")
        self.stepsizeinc = config.getfloat("Factors", "stepsizeinc")
        self.offset = config.getfloat("Factors", "initoffset")
        self.mindpp = config.getfloat("Thresholds", "mindpp")
        self.mindsse = config.getfloat("Thresholds", "mindsse")
        self.mindsumweights = config.getfloat("Thresholds", "mindsumweights")
        self.actfunc = config.get("Architecture", "activation")
        self.weightsinit = config.get("Architecture", "initweights")
        self.errfct = config.get("Architecture", "errorfunction")
        self.metrics = Metric(config.get("Output", "metrics"), config.getint("Output", "metricsclass"))
        self.verbosity = config.getint("Output", "verbosity")
        self.interactive = config.getboolean("Output", "interactive")

        self.weights = []
        self.outs = []
        self.deltas = []

        self.generateActivationFunction()

    ##############################################################################

    def generateActivationFunction(self):

        if self.actfunc == "logistic":

            def dphi(net):
                r = 1.0 / (1.0 + numpy.exp(-net * self.steepness))
                return numpy.multiply(r, (1.0 - r))

            self.phi = lambda net: 1.0 / (1.0 + numpy.exp(-net * self.steepness))
            self.dphi = dphi

        elif self.actfunc == "tanh":
            self.phi = lambda net: numpy.tanh(self.steepness * net)
            self.dphi = lambda net: self.steepness * (1.0 - numpy.power(numpy.tanh(net), 2))

        elif self.actfunc == "linear":
            self.phi = lambda net: self.steepness * net
            self.dphi = lambda net: self.steepness

        elif self.actfunc == "softmax":

            def phi(net):
                s = 1.0 / numpy.exp(-net).sum()
                return s * numpy.exp(-net)

            self.phi = foo

            def dphi(net):
                r = self.phi(net)
                return numpy.multiply(r, (1.0 - r))

            self.dphi = dphi

        elif self.actfunc == "gauss":
            self.phi = lambda net: numpy.exp(-numpy.power(net - 1, 2) * self.steepness)
            self.dphi = lambda net: -2 * numpy.multiply(net - 1, numpy.exp(-numpy.power(net - 1, 2)))

        elif self.actfunc == "sin":
            self.phi = lambda net: numpy.sin(self.steepness * net)
            self.dphi = lambda net: self.steepness * numpy.cos(self.steepness * net)
        else:
            logging.error("Unknown activation function. Available: logistic, tanh, linear, softmax, gauss, sin")
            sys.exit(-1)

    ##############################################################################

    def reload(self, config, weights):
        self.__init__(config)
        self.weights = weights

    ##############################################################################

    def initWeights(self, cls, feat):
        self.nIn = feat
        self.nOut = cls

        def initWeights(generateMatrixFunc):
            self.weights.append(generateMatrixFunc(self.nIn, self.nNodes))
            for i in range(1, self.nLayer):
                self.weights.append(generateMatrixFunc(self.nNodes, self.nNodes))
            self.weights.append(generateMatrixFunc(self.nNodes, self.nOut))

        if self.weightsinit == "randuni":

            def mat(n, m):
                return self.offset * (numpy.mat(numpy.random.rand(n, m)) + 0.5)

        elif self.weightsinit == "randgauss":

            def mat(n, m):
                return self.offset * numpy.mat(numpy.random.standard_normal([n, m]))

        elif self.weightsinit == "uniform":

            def mat(n, m):
                return self.offset * numpy.mat(numpy.ones([n, m]))

        elif self.weightsinit == "exponential":

            def mat(n, m):
                return self.offset * numpy.mat(numpy.random.standard_exponential(size=[n, m]))

        else:
            logging.error("Unknown weights initialization. Available: randuni, randgauss, uniform, exponential")
            sys.exit(-1)

        initWeights(mat)

        from copy import copy

        self.lastchange = copy(self.weights)

        self.outs = [None] * (self.nLayer + 1)
        self.deltas = [None] * (self.nLayer + 1)

    ##############################################################################

    def test(self, data):
        conf = numpy.zeros([self.nOut, self.nOut], numpy.int16)
        allprobs = [None] * len(data)
        for i, row in enumerate(data):
            allprobs[i] = self.passForward(row)
            conf[data.targets[i], allprobs[i].argmax()] += 1
            # TODO: not needed?
            allprobs[i] /= allprobs[i].sum()

        return conf, 1 - conf.trace() / float(conf.sum()), allprobs

    ##############################################################################

    def passForward(self, row):
        # input
        sum = row * self.weights[0]
        self.outs[0] = (sum, self.phi(sum))

        # next layers
        for w in range(1, self.nLayer + 1):
            sum = self.outs[w - 1][1] * self.weights[w]
            self.outs[w] = (sum, self.phi(sum))

        return self.outs[-1][1][0]

    ##############################################################################

    def train(self, data):
        sse = sys.maxint
        pp = sys.maxint

        self.initWeights(data.cls, data.feat)

        interactive = self.interactive and os.isatty(sys.stdout.fileno())

        ref = numpy.zeros([1, self.nOut])
        c_old = 0
        allprobs = [None] * len(data)

        for i in range(self.nIter):
            conf = numpy.zeros([self.nOut, self.nOut])

            sumold = sse
            ppold = pp
            sse = 0.0
            sce = 0.0
            if interactive:
                pbar = ProgressBar(maxval=len(data)).start()
            for k, row in enumerate(data):
                probs = self.passForward(row)
                ref[0, c_old] = 0
                ref[0, data.targets[k]] = 1
                c_old = data.targets[k]

                diff = ref - probs
                if self.errfct == "sse":
                    self.deltas[-1] = numpy.multiply(diff, self.dphi(probs))
                    sse += numpy.power(diff, 2).sum()
                elif self.errfct == "sce":
                    self.deltas[-1] = diff * self.steepness

                    # cross entropy: 1/C * sum{ (tk*log(yk)) + (1-tk)*log(1-yk) }
                    sce -= (
                        (numpy.multiply(ref, numpy.log(probs)) + numpy.multiply((1 - ref), numpy.log(1 - probs)))
                    ).sum() / self.nOut

                weightschange = self.passBackward(row)
                if interactive:
                    pbar.update(k)

                # train statistics
                c_pred = probs.argmax()
                conf[data.targets[k], c_pred] += 1
                allprobs[k] = probs

            # conf_, err, tepr = self.test( testdata )
            # conf_, err, tepr = self.test( data )
            output = self.metrics.obtain(data, allprobs, conf, 1 - conf.trace() / float(conf.sum()))

            if self.errfct == "sse":
                output["errfct"] = "SSE: % 6.4f" % sse
            elif self.errfct == "sce":
                output["errfct"] = "SCE: % 6.4f" % sce

            if interactive:
                pbar.finish()
            metrics = "%(lift)s%(pp)s%(fscore)s%(tester)s%(auc)s" % output
            logging.warning(
                "iter: % 4d er: %.6f %s rate: %.4f%s",
                i + 1,
                1 - conf.trace() / conf.sum(),
                output["errfct"],
                self.etha,
                metrics,
            )

            # for i in range(len(self.weights)):
            #    print "pruned:", (numpy.abs(self.weights[i])<0.1).sum()
            #    self.weights[i][numpy.abs(self.weights[i])<0.1]=0
            # if weightschange < self.mindsumweights:
            #    self.weights[-1] = self.weights[-1] + numpy.random.standard_normal([self.nNodes, self.nOut]) * 0.1
            #    logging.warning("disturbing weights for leaving local optimum...")

            if sumold - sse < self.mindsse or ppold - pp < self.mindpp:
                self.etha *= self.stepsizedec
            else:
                self.etha *= self.stepsizeinc
        return allprobs

    ##############################################################################

    def passBackward(self, row):
        # precompute deltas for the inner layers
        for l in range(self.nLayer)[::-1]:
            self.deltas[l] = self.deltas[l + 1] * self.weights[l + 1].T
            self.deltas[l] = numpy.multiply(self.deltas[l], self.dphi(self.outs[l][0]))
            # for i in range(self.nNodes):
            #    self.deltas[l][i] = 0.0
            #    for j in range(len(self.deltas[l+1])):
            #        self.deltas[l][i] += self.deltas[l+1][j] * self.weights[l+1][i,j]
            #    self.deltas[l][i] *= self.dphi( self.outs[l][i][0] )
        # self.etha *= (1-self.alpha)
        # output layer
        delta = self.etha * numpy.outer(self.outs[-2][1], self.deltas[-1]) + self.alpha * self.lastchange[-1]
        self.weights[-1] = self.weights[-1] + delta
        self.lastchange[-1] = delta
        # for j in range(self.nOut):
        #    f = self.etha * self.deltas[-1][j]
        #    for i in range( self.nNodes ):
        #        self.weights[-1][i,j] += f * self.outs[-2][i][1]

        # recalculate weights forwards
        # inner layers
        for l in range(1, self.nLayer):
            # for j in range(self.nNodes):
            #    f = self.etha * self.deltas[l][j]
            #    for i in range (self.nNodes):
            #        self.weights[l][i,j] += f * self.outs[l-1][i][1]
            delta = (1 - self.alpha) * self.etha * numpy.outer(
                self.outs[l - 1][1], self.deltas[l]
            ) + self.alpha * self.lastchange[l]
            self.weights[l] = self.weights[l] + delta
            self.lastchange[l] = delta

        # input vector once again influences w'
        # for j in range(self.nNodes):
        #    f = self.etha * self.deltas[0][j]
        #    for i in range(self.nIn):
        #        self.weights[0][i,j] += f * row[i]
        delta = (1 - self.alpha) * self.etha * numpy.outer(row, self.deltas[0]) + self.alpha * self.lastchange[0]
        self.weights[0] = self.weights[0] + delta
        self.lastchange[0] = delta

        return sum([d.sum() for d in self.lastchange])

    ##############################################################################

    def savemodel(self, modelname):
        import pickle

        model = (self.weights, self.config)
        pickle.dump(model, open(modelname, "w"))

    def loadmodel(self, modelname):
        import pickle

        self.weights, self.config = pickle.load(file(modelname))
        self.reload(self.config, self.weights)
Exemple #32
0
 def gauge(self, metric_name, value, tags=None):
     new_gauge = Metric(metric_name)
     new_gauge.metric = value
     self.write_queue.put(new_gauge, tags)
Exemple #33
0
class Model:
    """
    This class handles basic methods for handling the model:
    1. Fit the model
    2. Make predictions
    3. Save
    4. Load
    """
    def __init__(self, input_size, n_channels, hparams):

        self.hparams = hparams

        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")

        # define the models
        self.model = WaveNet(n_channels=n_channels).to(self.device)
        summary(self.model, (input_size, n_channels))
        # self.model.half()

        if torch.cuda.device_count() > 1:
            print("Number of GPUs will be used: ",
                  torch.cuda.device_count() - 3)
            self.model = DP(self.model,
                            device_ids=list(
                                range(torch.cuda.device_count() - 3)))
        else:
            print('Only one GPU is available')

        self.metric = Metric()
        self.num_workers = 1
        ########################## compile the model ###############################

        # define optimizer
        self.optimizer = torch.optim.Adam(params=self.model.parameters(),
                                          lr=self.hparams['lr'],
                                          weight_decay=1e-5)

        # weights = torch.Tensor([0.025,0.033,0.039,0.046,0.069,0.107,0.189,0.134,0.145,0.262,1]).cuda()
        self.loss = nn.BCELoss()  # CompLoss(self.device)

        # define early stopping
        self.early_stopping = EarlyStopping(
            checkpoint_path=self.hparams['checkpoint_path'] + '/checkpoint.pt',
            patience=self.hparams['patience'],
            delta=self.hparams['min_delta'],
        )
        # lr cheduler
        self.scheduler = ReduceLROnPlateau(
            optimizer=self.optimizer,
            mode='max',
            factor=0.2,
            patience=3,
            verbose=True,
            threshold=self.hparams['min_delta'],
            threshold_mode='abs',
            cooldown=0,
            eps=0,
        )

        self.seed_everything(42)
        self.threshold = 0.75
        self.scaler = torch.cuda.amp.GradScaler()

    def seed_everything(self, seed):
        np.random.seed(seed)
        os.environ['PYTHONHASHSEED'] = str(seed)
        torch.manual_seed(seed)

    def fit(self, train, valid):

        train_loader = DataLoader(
            train,
            batch_size=self.hparams['batch_size'],
            shuffle=True,
            num_workers=self.num_workers)  # ,collate_fn=train.my_collate
        valid_loader = DataLoader(
            valid,
            batch_size=self.hparams['batch_size'],
            shuffle=False,
            num_workers=self.num_workers)  # ,collate_fn=train.my_collate

        # tensorboard object
        writer = SummaryWriter()

        for epoch in range(self.hparams['n_epochs']):

            # trian the model
            self.model.train()
            avg_loss = 0.0

            train_preds, train_true = torch.Tensor([]), torch.Tensor([])

            for (X_batch, y_batch) in tqdm(train_loader):
                y_batch = y_batch.float().to(self.device)
                X_batch = X_batch.float().to(self.device)

                self.optimizer.zero_grad()
                # get model predictions
                pred = self.model(X_batch)
                X_batch = X_batch.cpu().detach()

                # process loss_1
                pred = pred.view(-1, pred.shape[-1])
                y_batch = y_batch.view(-1, y_batch.shape[-1])
                train_loss = self.loss(pred, y_batch)
                y_batch = y_batch.float().cpu().detach()
                pred = pred.float().cpu().detach()

                train_loss.backward(
                )  #self.scaler.scale(train_loss).backward()  #
                # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1)
                # torch.nn.utils.clip_grad_value_(self.model.parameters(), 0.5)
                self.optimizer.step()  # self.scaler.step(self.optimizer)  #
                self.scaler.update()

                # calc metric
                avg_loss += train_loss.item() / len(train_loader)

                train_true = torch.cat([train_true, y_batch], 0)
                train_preds = torch.cat([train_preds, pred], 0)

            # calc triaing metric
            train_preds = train_preds.numpy()
            train_preds[np.where(train_preds >= self.threshold)] = 1
            train_preds[np.where(train_preds < self.threshold)] = 0
            metric_train = self.metric.compute(labels=train_true.numpy(),
                                               outputs=train_preds)

            # evaluate the model
            print('Model evaluation...')
            self.model.zero_grad()
            self.model.eval()
            val_preds, val_true = torch.Tensor([]), torch.Tensor([])
            avg_val_loss = 0.0
            with torch.no_grad():
                for X_batch, y_batch in valid_loader:
                    y_batch = y_batch.float().to(self.device)
                    X_batch = X_batch.float().to(self.device)

                    pred = self.model(X_batch)
                    X_batch = X_batch.float().cpu().detach()

                    pred = pred.reshape(-1, pred.shape[-1])
                    y_batch = y_batch.view(-1, y_batch.shape[-1])

                    avg_val_loss += self.loss(
                        pred, y_batch).item() / len(valid_loader)
                    y_batch = y_batch.float().cpu().detach()
                    pred = pred.float().cpu().detach()

                    val_true = torch.cat([val_true, y_batch], 0)
                    val_preds = torch.cat([val_preds, pred], 0)

            # evalueate metric
            val_preds = val_preds.numpy()
            val_preds[np.where(val_preds >= self.threshold)] = 1
            val_preds[np.where(val_preds < self.threshold)] = 0
            metric_val = self.metric.compute(val_true.numpy(), val_preds)

            self.scheduler.step(avg_val_loss)
            res = self.early_stopping(score=avg_val_loss, model=self.model)

            # print statistics
            if self.hparams['verbose_train']:
                print(
                    '| Epoch: ',
                    epoch + 1,
                    '| Train_loss: ',
                    avg_loss,
                    '| Val_loss: ',
                    avg_val_loss,
                    '| Metric_train: ',
                    metric_train,
                    '| Metric_val: ',
                    metric_val,
                    '| Current LR: ',
                    self.__get_lr(self.optimizer),
                )

            # # add history to tensorboard
            writer.add_scalars(
                'Loss',
                {
                    'Train_loss': avg_loss,
                    'Val_loss': avg_val_loss
                },
                epoch,
            )

            writer.add_scalars('Metric', {
                'Metric_train': metric_train,
                'Metric_val': metric_val
            }, epoch)

            if res == 2:
                print("Early Stopping")
                print(
                    f'global best min val_loss model score {self.early_stopping.best_score}'
                )
                break
            elif res == 1:
                print(f'save global val_loss model score {avg_val_loss}')

        writer.close()

        self.model.zero_grad()

        return True

    def predict(self, X_test):

        # evaluate the model
        self.model.eval()

        test_loader = torch.utils.data.DataLoader(
            X_test,
            batch_size=self.hparams['batch_size'],
            shuffle=False,
            num_workers=self.num_workers)  # ,collate_fn=train.my_collate

        test_preds = torch.Tensor([])
        print('Start generation of predictions')
        with torch.no_grad():
            for i, (X_batch, y_batch) in enumerate(tqdm(test_loader)):
                X_batch = X_batch.float().to(self.device)

                pred = self.model(X_batch)

                X_batch = X_batch.float().cpu().detach()

                test_preds = torch.cat([test_preds, pred.cpu().detach()], 0)

        return test_preds.numpy()

    def get_heatmap(self, X_test):

        # evaluate the model
        self.model.eval()

        test_loader = torch.utils.data.DataLoader(
            X_test,
            batch_size=self.batch_size,
            shuffle=False,
            num_workers=self.num_workers)  # ,collate_fn=train.my_collate

        test_preds = torch.Tensor([])
        with torch.no_grad():
            for i, (X_batch) in enumerate(test_loader):
                X_batch = X_batch.float().to(self.device)

                pred = self.model.activatations(X_batch)
                pred = torch.sigmoid(pred)

                X_batch = X_batch.float().cpu().detach()

                test_preds = torch.cat([test_preds, pred.cpu().detach()], 0)

        return test_preds.numpy()

    def model_save(self, model_path):
        torch.save(self.model, model_path)
        return True

    def model_load(self, model_path):
        self.model = torch.load(model_path)
        return True

    ################## Utils #####################

    def __get_lr(self, optimizer):
        for param_group in optimizer.param_groups:
            return param_group['lr']
Exemple #34
0
    def best_split(self, metric: Metric) -> Tuple[float, float]:

        vals = metric.evaluate(self)
        i = metric.best_index(vals)
        
        return self.uniq[i], vals[i]
Exemple #35
0
class LSTM_Env(gym.Env):
    def __init__(self, df_m15, df_h1, serial=False):
        self.df_m15 = standardize_data(
            df_m15, method="log_and_diff").dropna().reset_index(drop=True)
        self.df_h1 = df_h1
        self.net_worth = INITIAL_BALANCE
        self.prev_net_worth = INITIAL_BALANCE
        self.usd_held = INITIAL_BALANCE
        self.eur_held = 0
        self.current_step = 0
        self.reward = 0
        self.serial = serial
        # trade history
        self.trades = []
        # our profit in last 5 trades
        self.returns = np.zeros(10)

        # index of episodes (1 episode equivalent to 1 week of trading)
        self.episode_indices_m15, self.h1_indices = get_episode(
            self.df_m15, self.df_h1)
        self.action_space = spaces.Discrete(6)
        # observation space, includes: OLHC prices (normalized), close price (unnormalized),
        # time in minutes(encoded), day of week(encoded), action history, net worth changes history
        # both minutes, days feature are encoded using sin and cos function to retain circularity
        self.observation_space = spaces.Box(low=-10,
                                            high=10,
                                            shape=(12, WINDOW_SIZE + 1),
                                            dtype=np.float16)
        self.metrics = Metric(INITIAL_BALANCE)
        self.setup_active_df()
        self.agent_history = {
            "actions":
            np.zeros(len(self.active_df) + WINDOW_SIZE),
            "net_worth":
            np.zeros(len(self.active_df) + WINDOW_SIZE),
            "eur_held":
            np.zeros(len(self.active_df) + WINDOW_SIZE),
            "usd_held":
            np.full(len(self.active_df), self.usd_held / BALANCE_NORM_FACTOR)
        }

    def get_metrics(self):
        return self.metrics

    def get_current_price(self):
        """
        :return: (float) closing price at current time step
        """
        return self.active_df.iloc[self.current_step + WINDOW_SIZE].Close

    def reset(self):
        """
        reset the environment to a fresh new state
        :return: (Gym.Box) new observation
        """
        self.reset_session()
        return self.next_observation()

    def setup_active_df(self):
        """
        select fragment of data we will use to train agent in this epoch
        :return: None
        """
        # if serial mode is enabled, we traverse through training data from 2012->2019
        # else we'll just jumping randomly betweek these times
        if self.serial:
            self.steps_left = len(self.df_m15) - WINDOW_SIZE - 1
            self.frame_start = 0
        else:
            # pick random episode index from our db
            episode_index = np.random.randint(0,
                                              self.metrics.current_epoch * 8)
            # check if we have reached the end of dataset
            # and reroll the invalid index
            if episode_index >= len(self.episode_indices_m15):
                episode_index = np.random.randint(
                    0, len(self.episode_indices_m15))

            (start_episode,
             end_episode) = self.episode_indices_m15[episode_index]
            self.steps_left = end_episode - start_episode - WINDOW_SIZE
            self.frame_start = start_episode

        self.active_df = self.df_m15[self.frame_start:self.frame_start +
                                     self.steps_left + WINDOW_SIZE + 1]

    def reset_variables(self):
        """
        reset all variables that involve with the environment
        :return: None
        """
        self.current_step = 0
        self.net_worth = INITIAL_BALANCE
        self.prev_net_worth = INITIAL_BALANCE
        self.usd_held = INITIAL_BALANCE
        self.eur_held = 0
        self.trades = []
        self.returns = np.zeros(10)
        self.agent_history = {
            "actions":
            np.zeros(len(self.active_df) + WINDOW_SIZE),
            "net_worth":
            np.zeros(len(self.active_df) + WINDOW_SIZE),
            "eur_held":
            np.zeros(len(self.active_df) + WINDOW_SIZE),
            "usd_held":
            np.full(len(self.active_df), self.usd_held / BALANCE_NORM_FACTOR)
        }

    def reset_session(self):
        """
        reset all variables and setup new training session
        :return: None
        """
        self.setup_active_df()
        self.reset_variables()

    def calculate_reward(self, action):
        """
        update reward we get at this time step
        :return: None
        """
        # calculate reward
        self.reward = 0
        profit = self.net_worth - self.prev_net_worth
        if profit > 0:
            self.reward += 0.3
        elif profit < 0:
            self.reward -= 0.1
        # if np.mean(self.returns) > 0:
        #     self.reward += 0.5
        # elif np.mean(self.returns) < 0:
        #     self.reward -= 0.4

        # wining_trade_count = np.sum(self.returns > 0)
        # losing_trade_count = np.sum(self.returns < 0)
        # if wining_trade_count > 5:
        #     self.reward += wining_trade_count * 0.05
        # if losing_trade_count > 5:
        #     self.reward -= losing_trade_count * 0.05

        if abs(self.eur_held) > LOT_SIZE * 2:
            self.reward -= 0.2 * abs(self.eur_held) / LOT_SIZE

    def step(self, action):
        """
        Perform choosen action and get the response from environment
        :param action: (int) 0 = hold, 1 = buy, 2 = sell, 3 = close, 4 = close and buy, 5 = close and sell
        :return: tuple contains (new observation, reward, isDone, {})
        """
        # perform action and update utility variables
        self.take_action(action, self.get_current_price())
        self.update_env(action)
        self.calculate_reward(action)
        # summary training process
        self.metrics.summary(action, self.net_worth, self.prev_net_worth,
                             self.reward, self.eur_held)

        # get next observation and check whether we has finished this episode yet
        obs = self.next_observation()
        done = self.net_worth <= 0

        # reset session if we've reached the end of episode
        if self.steps_left == 0:
            self.reset_session()
            done = True

        return obs, self.reward, done, {}

    def take_action(self, action, sell_price):
        """
        Perform choosen action and then update our balance according to market state
        :param action: (int) 0 = hold, 1 = buy, 2 = sell, 3 = close, 4 = close and buy, 5 = close and sell
        :param sell_price: (float) current closing price
        :return: None
        """
        # in forex, we buy with current price + comission (it's normaly 3 pip
        # with eurusd pair)
        buy_price = sell_price + COMISSION
        '''assume we have 100,000 usd and 0 eur
        assume current price is 1.5 (1 eur = 1.5 usd)
        assume comission = 3 pip = 0.0003
        => true buy price = 1.5003, sell price = 1.5
        buy 0.5 lot eur => we have 50,000 eur and (100,000 - 50,000 * 1.5003) = 24985 usd
        => out networth: 50,000 * 1.5 + 24985 = 99985 (we lose 3 pip, 1 pip = 5 usd,
        we are using 0.5 lot as defaut, if we buy 1 lot => 1 pip = 10 usd, correct!!! )'''
        if action == CLOSE_AND_BUY:  # buy eur
            self.close_and_buy(buy_price, sell_price)
        elif action == CLOSE_AND_SELL:  # sell eur
            self.close_and_sell(buy_price, sell_price)
        elif action == CLOSE:
            self.close_all_order(buy_price, sell_price)
        elif action == BUY:
            self.buy(buy_price)
        elif action == SELL:
            self.sell(sell_price)

    def update_env(self, action):
        """
        update some environment variables relate to net worth
        :param action: (Int) action enum
        :return: None
        """
        sell_price = self.get_current_price()
        buy_price = sell_price + COMISSION
        # convert our networth to pure usd
        self.prev_net_worth = self.net_worth
        self.net_worth = self.usd_held + \
                         (self.eur_held * sell_price if self.eur_held > 0 else self.eur_held * buy_price)

        self.update_agent_history(sell_price, action)
        # increase training data after one epoch
        if self.metrics.num_step % 500000 == 0 and self.metrics.num_step > 0:
            self.metrics.current_epoch += 1
        self.steps_left -= 1
        self.current_step += 1
        self.returns[self.current_step %
                     10] = self.net_worth - self.prev_net_worth

    def update_agent_history(self, sell_price, action):
        """
        update variables relate to agent trading history
        :param sell_price: (Float)
        :param action: (Int)
        :return: None
        """
        self.trades.append({
            'price': sell_price,
            'eur_held': self.eur_held,
            'usd_held': self.usd_held,
            'net_worth': self.net_worth,
            "prev_net_worth": self.prev_net_worth,
            'type': ACTIONS[action]
        })

        # save these variables for training
        self.agent_history["actions"][self.current_step + WINDOW_SIZE +
                                      1] = action
        self.agent_history["eur_held"][self.current_step + WINDOW_SIZE +
                                       1] = self.eur_held / BALANCE_NORM_FACTOR
        self.agent_history["usd_held"][self.current_step + WINDOW_SIZE +
                                       1] = self.usd_held / BALANCE_NORM_FACTOR
        self.agent_history["net_worth"][self.current_step + WINDOW_SIZE + 1] = \
            (self.net_worth - self.prev_net_worth) / LOT_SIZE

    def close_and_buy(self, buy_price, sell_price):
        self.usd_held += (self.eur_held *
                          sell_price if self.eur_held > 0 else self.eur_held *
                          buy_price)
        # buy some eur
        self.eur_held = AMOUNT * LOT_SIZE
        self.usd_held -= AMOUNT * LOT_SIZE * buy_price

    def close_and_sell(self, buy_price, sell_price):
        self.usd_held += (self.eur_held *
                          sell_price if self.eur_held > 0 else self.eur_held *
                          buy_price)
        # sell some eur
        self.eur_held = -AMOUNT * LOT_SIZE
        self.usd_held += AMOUNT * LOT_SIZE * sell_price

    def close_all_order(self, buy_price, sell_price):
        # close trade, release all eur we are holding (or buying)
        self.usd_held += (self.eur_held *
                          sell_price if self.eur_held > 0 else self.eur_held *
                          buy_price)
        self.eur_held = 0

    def buy(self, buy_price):
        self.eur_held += AMOUNT * LOT_SIZE
        self.usd_held -= AMOUNT * LOT_SIZE * buy_price

    def sell(self, sell_price):
        self.eur_held -= AMOUNT * LOT_SIZE
        self.usd_held += AMOUNT * LOT_SIZE * sell_price

    def next_observation(self):
        # return the next observation of the environment
        end = self.current_step + WINDOW_SIZE + 1

        # atr = ta.average_true_range(self.active_df.High[self.current_step: end] * 100,
        #                             self.active_df.Low[self.current_step: end] * 100,
        #                             self.active_df.Close[self.current_step: end] * 100, n=9, fillna=True).to_numpy()
        # macd = ta.macd(self.active_df.Close[self.current_step: end] * 200, n_fast=9, n_slow=9, fillna=True).to_numpy()
        # rsi = ta.rsi(self.active_df.Close[self.current_step: end] / 100, fillna=True, n=9).to_numpy()

        obs = np.array([
            # self.active_df['Open'].values[self.current_step: end],
            # self.active_df['High'].values[self.current_step: end],
            # self.active_df['Low'].values[self.current_step: end],
            # self.active_df['NormedClose'].values[self.current_step: end],
            # self.active_df['Close'].values[self.current_step: end],
            self.active_df['CandleEmbededX'].values[self.current_step:end],
            self.active_df['CandleEmbededY'].values[self.current_step:end],
            self.active_df['TimeEncodedX'].values[self.current_step:end],
            self.active_df['TimeEncodedY'].values[self.current_step:end],
            self.active_df['DayEncodedX'].values[self.current_step:end],
            self.active_df['DayEncodedY'].values[self.current_step:end],
            # self.active_df['HighRiskTime'].values[self.current_step: end],
            self.agent_history["actions"][self.current_step:end],
            self.agent_history["net_worth"][self.current_step:end],
            self.agent_history["eur_held"][self.current_step:end],
            self.agent_history["usd_held"][self.current_step:end]
            # atr,
            # macd,
            # rsi
        ])

        return obs

    def render(self, mode='human'):
        """
        show information about trainin process
        :param mode: (string) if human mode is selected, display a additional
            graph that visualize trades, net worth, prices...
        :return: None
        """
        if mode == 'human':
            # init graph
            if not hasattr(self, 'visualization'):
                self.visualization = StockTradingGraph(self.df_m15,
                                                       "Reward visualization")
            # render it if we have enough information
            if self.current_step > WINDOW_SIZE and mode == 'human':
                self.visualization.render(self.current_step,
                                          self.net_worth,
                                          self.reward,
                                          window_size=WINDOW_SIZE)
        # print out some statistic about agent
        if self.metrics.num_step % 50 == 0:
            # save these variables for plotting
            self.metrics.update_for_plotting()
            print("{:<25s}{:>5.2f}".format("current step:", self.current_step))
            print("{:<25s}{:>5.2f}".format("Total win trades:",
                                           self.metrics.win_trades))
            print("{:<25s}{:>5.2f}".format("Total lose trades:",
                                           self.metrics.lose_trades))
            print("{:<25s}{:>5.2f}".format("Total hold trades:",
                                           self.metrics.hold_trades))
            print("{:<25s}{:>5.2f}".format("Avg win value:",
                                           self.metrics.avg_win_value))
            print("{:<25s}{:>5.2f}".format("Avg lose value:",
                                           self.metrics.avg_lose_value))
            print("{:<25s}{:>5.2f}".format(
                "Avg reward:",
                self.metrics.avg_reward / self.metrics.num_step))
            print("{:<25s}{:>5.2f}".format("Highest net worth:",
                                           self.metrics.highest_net_worth))
            print("{:<25s}{:>5.2f}".format("Lowest net worth:",
                                           self.metrics.lowest_net_worth))
            print("{:<25s}{:>5.2f}".format("Most profit trade win:",
                                           self.metrics.most_profit_trade))
            print("{:<25s}{:>5.2f}".format("Worst trade lose:",
                                           self.metrics.worst_trade))
            print("{:<25s}{:>5.2f}".format(
                "Win ratio:", self.metrics.win_trades /
                (self.metrics.lose_trades + 1 + self.metrics.win_trades)))
            print('-' * 80)
Exemple #36
0
    def __init__(self, input_size, n_channels, hparams, gpu, inference=False):

        self.hparams = hparams

        if inference:
            self.device = torch.device('cpu')
            self.model = ECGNet(n_channels=n_channels,
                                hparams=self.hparams).to(self.device)
        else:
            if torch.cuda.device_count() > 1:
                if len(gpu) > 0:
                    print("Number of GPUs will be used: ", len(gpu))
                    self.device = torch.device(f"cuda:{gpu[0]}" if torch.cuda.
                                               is_available() else "cpu")
                    self.model = ECGNet(n_channels=n_channels,
                                        hparams=self.hparams).to(self.device)
                    self.model = DP(self.model,
                                    device_ids=gpu,
                                    output_device=gpu[0])
                else:
                    print("Number of GPUs will be used: ",
                          torch.cuda.device_count() - 5)
                    self.device = torch.device(
                        "cuda:0" if torch.cuda.is_available() else "cpu")
                    self.model = ECGNet(n_channels=n_channels,
                                        hparams=self.hparams).to(self.device)
                    self.model = DP(self.model,
                                    device_ids=list(
                                        range(torch.cuda.device_count() - 5)))
            else:
                self.device = torch.device(
                    "cuda:0" if torch.cuda.is_available() else "cpu")
                self.model = ECGNet(n_channels=n_channels,
                                    hparams=self.hparams).to(self.device)
                print('Only one GPU is available')

        # define the models
        #summary(self.model, (input_size, n_channels))
        #print(torch.cuda.is_available())

        self.metric = Metric()
        self.num_workers = 18
        self.threshold = 0.5

        ########################## compile the model ###############################

        # define optimizer
        self.optimizer = torch.optim.Adam(params=self.model.parameters(),
                                          lr=self.hparams['lr'])

        weights = torch.Tensor([
            1., 1., 1., 1., 0.5, 1., 1., 1., 1., 1., 1., 1., 0.5, 0.5, 1., 1.,
            1., 1., 0.5, 1., 1., 1., 1., 0.5, 1., 1., 0.5
        ]).to(self.device)

        self.loss = nn.BCELoss(weight=weights)  # CompLoss(self.device) #
        self.decoder_loss = nn.MSELoss()

        # define early stopping
        self.early_stopping = EarlyStopping(
            checkpoint_path=self.hparams['checkpoint_path'] + '/checkpoint' +
            str(self.hparams['start_fold']) + '.pt',
            patience=self.hparams['patience'],
            delta=self.hparams['min_delta'],
            is_maximize=True,
        )
        # lr cheduler
        self.scheduler = ReduceLROnPlateau(
            optimizer=self.optimizer,
            mode='max',
            factor=0.2,
            patience=1,
            verbose=True,
            threshold=self.hparams['min_delta'],
            threshold_mode='abs',
            cooldown=0,
            eps=0,
        )

        self.seed_everything(42)

        self.postprocessing = PostProcessing(fold=self.hparams['start_fold'])
        self.scaler = torch.cuda.amp.GradScaler()