def _init_saver(self):
     self.saver = saver.Saver(
         model=self._model_settings.name,
         root_directory=self._model_settings.root_directory,
         hyperparameters=self._model_settings.hyperparameters,
         function_id=self.function_id,
         dimension=self.dimensions,
         kernel_id=self.kernel_id,
         run=self.run,
         losses=self._model_settings.losses,
         additional_datasets=self._model_settings.additional_datasets,
         disable=not self._model_settings.save_results)
     if self._model_settings.allow_skipping:
         self.position = self.saver.already_computed
     elif self.saver.already_computed:
         self.saver.clean(co.saver.hdf5_tmp_path)
    if Settings.TEST_ON_DYNAMICS:
        print("At test time, full dynamics are being used\n")
    else:
        print("At test time, kinematics are being used\n")

    if Settings.KINEMATIC_NOISE:
        print(
            "Noise is being applied to the kinematics during training to simulate a poor controller\n"
        )

    ##############################
    ##### Initializing items #####
    ##############################

    # Initializing saver class (for loading & saving data)
    saver = saver.Saver(sess, filename)

    # Initializing replay buffer, with the option of a prioritized replay buffer
    if Settings.PRIORITY_REPLAY_BUFFER:
        replay_buffer = PrioritizedReplayBuffer()
    else:
        replay_buffer = ReplayBuffer()

    # Initializing thread & process list
    threads = []
    environment_processes = []

    # Event()s are used to communicate with threads while they run.
    # In this case, it is used to signal to the threads when it is time to stop gracefully.
    stop_run_flag = threading.Event()  # Flag to stop all threads
    replay_buffer_dump_flag = threading.Event(
Exemple #3
0
    return str(result, encoding="utf-8").upper()


myWallet = classes.Wallet(name="Sven's wallet",
                          curr_amounts={
                              "USD": 5.21,
                              "EUR": 0.29,
                              "BTC": 3.70511573 + 0.0599833 + 0.094,
                              "ETH": 12.65 + 0.03,
                              "LTC": 79.691 + 0.1
                          })

symbols = ("BTC-EUR", "ETH-EUR", "LTC-EUR")
# symbols = ("BTC-EUR", "ETH-EUR", "LTC-EUR")
subscriptions = [] + \
                ['{"action": "subscribe", "channel": "l3", "symbol": "' + f"{symbol}" + '"}' for symbol in symbols]

if __name__ == "__main__":
    for sub in subscriptions:
        logger.debug(sub)
    exchange = classes.Exchange(symbols)
    with saver.Saver(pathlib.Path("~/exchange_data")) as svr:
        try:
            server = servers.Exchange_Blockchain(subscriptions)
            for message in server.listen():
                svr.save(message)
                exchange.process(message)
                exchange.evaluate(myWallet)
        except KeyboardInterrupt:
            logger.debug("Cought Keyboard Interrupt, quitting.")
Exemple #4
0
def train():
    # hyper params
    n_epoch = 4000
    max_grad_norm = 3

    # load
    dictionary = pickle.load(open(path_dictionary, 'rb'))
    event2word, word2event = dictionary
    train_data = np.load(path_train_data)

    # create saver
    saver_agent = saver.Saver(path_exp)

    # config
    n_class = []
    for key in event2word.keys():
        n_class.append(len(dictionary[0][key]))

    # log
    print('num of classes:', n_class)

    # init
    net = TransformerModel(n_class)
    net.cuda()
    net.train()
    n_parameters = network_paras(net)
    print('n_parameters: {:,}'.format(n_parameters))
    saver_agent.add_summary_msg(' > params amount: {:,d}'.format(n_parameters))

    # load model
    if info_load_model:
        path_ckpt = info_load_model[0]  # path to ckpt dir
        loss = info_load_model[1]  # loss
        name = 'loss_' + str(loss)
        path_saved_ckpt = os.path.join(path_ckpt, name + '_params.pt')
        print('[*] load model from:', path_saved_ckpt)
        net.load_state_dict(torch.load(path_saved_ckpt))

    # optimizers
    optimizer = optim.Adam(net.parameters(), lr=init_lr)

    # unpack
    train_x = train_data['x']
    train_y = train_data['y']
    train_mask = train_data['mask']
    num_batch = len(train_x) // batch_size

    print('     num_batch:', num_batch)
    print('    train_x:', train_x.shape)
    print('    train_y:', train_y.shape)
    print('    train_mask:', train_mask.shape)

    # run
    start_time = time.time()
    for epoch in range(n_epoch):
        acc_loss = 0
        acc_losses = np.zeros(7)

        for bidx in range(num_batch):  # num_batch
            saver_agent.global_step_increment()

            # index
            bidx_st = batch_size * bidx
            bidx_ed = batch_size * (bidx + 1)

            # unpack batch data
            batch_x = train_x[bidx_st:bidx_ed]
            batch_y = train_y[bidx_st:bidx_ed]
            batch_mask = train_mask[bidx_st:bidx_ed]

            # to tensor
            batch_x = torch.from_numpy(batch_x).long().cuda()
            batch_y = torch.from_numpy(batch_y).long().cuda()
            batch_mask = torch.from_numpy(batch_mask).float().cuda()

            # run
            losses = net.train_step(batch_x, batch_y, batch_mask)
            loss = (losses[0] + losses[1] + losses[2] + losses[3] + losses[4] +
                    losses[5] + losses[6]) / 7

            # Update
            net.zero_grad()
            loss.backward()
            if max_grad_norm is not None:
                clip_grad_norm_(net.parameters(), max_grad_norm)
            optimizer.step()

            # print
            sys.stdout.write(
                '{}/{} | Loss: {:06f} | {:04f}, {:04f}, {:04f}, {:04f}, {:04f}, {:04f}, {:04f}\r'
                .format(bidx, num_batch, loss, losses[0], losses[1], losses[2],
                        losses[3], losses[4], losses[5], losses[6]))
            sys.stdout.flush()

            # acc
            acc_losses += np.array([l.item() for l in losses])
            acc_loss += loss.item()

            # log
            saver_agent.add_summary('batch loss', loss.item())

        # epoch loss
        runtime = time.time() - start_time
        epoch_loss = acc_loss / num_batch
        acc_losses = acc_losses / num_batch
        print('------------------------------------')
        print('epoch: {}/{} | Loss: {} | time: {}'.format(
            epoch, n_epoch, epoch_loss,
            str(datetime.timedelta(seconds=runtime))))
        each_loss_str = '{:04f}, {:04f}, {:04f}, {:04f}, {:04f}, {:04f}, {:04f}\r'.format(
            acc_losses[0], acc_losses[1], acc_losses[2], acc_losses[3],
            acc_losses[4], acc_losses[5], acc_losses[6])
        print('    >', each_loss_str)

        saver_agent.add_summary('epoch loss', epoch_loss)
        saver_agent.add_summary('epoch each loss', each_loss_str)

        # save model, with policy
        loss = epoch_loss
        if 0.4 < loss <= 0.8:
            fn = int(loss * 10) * 10
            saver_agent.save_model(net, name='loss_' + str(fn))
        elif 0.05 < loss <= 0.40:
            fn = int(loss * 100)
            saver_agent.save_model(net, name='loss_' + str(fn))
        elif loss <= 0.05:
            print('Finished')
            return
        else:
            saver_agent.save_model(net, name='loss_high')
Exemple #5
0
    def train(self, train_data, trainConfig, device, resume):
        checkpoint_dir = trainConfig['experiment_Dir']
        batch_size = trainConfig['batch_size']
        data_ROOT = trainConfig['ROOT']
        torch.manual_seed(trainConfig["seed"])

        # create saver
        saver_agent = saver.Saver(checkpoint_dir)

        #Prepare model
        if resume != 'None':
            st_epoch, model = self.get_model(resume)
            print('Continue to train from {} epoch'.format(st_epoch))
        else:
            st_epoch, model = self.get_model()

        optimizer = optim.Adam(model.parameters(), lr=trainConfig['lr'])
        train_step = 0
        epoch_train_loss = []
        save_freq = trainConfig['save_freq']

        n_parameters = network_paras(model)
        print('n_parameters: {:,}'.format(n_parameters))
        saver_agent.add_summary_msg(
            ' > params amount: {:,d}'.format(n_parameters))

        # unpack
        train_x = train_data['x']
        train_y = train_data['y']
        mask = train_data['mask']
        num_groups = train_data['num_groups']

        num_batches = len(train_x) // batch_size

        print('>>> Start training')
        for epoch in range(st_epoch, trainConfig['num_epochs']):
            saver_agent.global_step_increment()

            train_loss = []
            st_time = time.time()
            model.train()

            for bidx in range(num_batches):

                model.zero_grad()

                # index
                bidx_st = batch_size * bidx
                bidx_ed = batch_size * (bidx + 1)

                # get batch
                batch_x = train_x[bidx_st:bidx_ed]
                batch_y = train_y[bidx_st:bidx_ed]
                batch_mask = mask[bidx_st:bidx_ed]
                n_group = np.max(num_groups[bidx_st:bidx_ed])

                # proc groups
                mems = tuple()
                for gidx in range(n_group):
                    group_x = batch_x[:, gidx, :]
                    group_y = batch_y[:, gidx, :]
                    group_mask = batch_mask[:, gidx, :]

                    group_x = torch.from_numpy(group_x).permute(
                        1, 0).contiguous().to(
                            self.device).long()  # (seq_len, bsz)
                    group_y = torch.from_numpy(group_y).permute(
                        1, 0).contiguous().to(self.device).long()
                    group_mask = torch.from_numpy(group_mask).to(
                        self.device).float()

                    ret = model(group_x, group_y, group_mask, *mems)
                    loss, mems = ret[0], ret[1:]
                    train_loss.append(loss.item())
                    loss.backward()

                    sys.stdout.write(
                        'epoch:{:3d}/{:3d}, batch: {:4d}/{:4d}, group: {:2d}/{:2d} | Loss: {:6f}\r'
                        .format(epoch, trainConfig['num_epochs'], bidx,
                                num_batches, gidx, n_group, loss.item()))
                    sys.stdout.flush()

                optimizer.step()

            #val_loss = self.validate(val_data, batch_size, model, trainConfig["seed"], trainConfig['max_eval_steps'])
            curr_train_loss = sum(train_loss) / len(train_loss)
            saver_agent.add_summary('epoch loss', curr_train_loss)

            #epoch_val_loss.append(val_loss)
            epoch_train_loss.append(curr_train_loss)
            # epoch_info = 'Train Loss: {:.5f} , Val Loss: {:.5f}, T: {:.3f}'.format(curr_train_loss, val_loss, time.time()-st_time)
            epoch_info = 'Epoch: {}, Train Loss: {:.5f} ,  T: {:.3f}'.format(
                epoch + 1, curr_train_loss,
                time.time() - st_time)
            print(epoch_info)

            # self.train_loss_record(epoch, curr_train_loss, checkpoint_dir, val_loss)
            self.train_loss_record(epoch, curr_train_loss, checkpoint_dir)
            self.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'model_setting': self.modelConfig,
                    'train_setting': trainConfig,
                    'state_dict': model.state_dict(),
                    'best_loss': curr_train_loss,
                    'optimizer': optimizer.state_dict(),
                }, checkpoint_dir, save_freq)

            if curr_train_loss < 0.01:
                print('Experiment [{}] finished at loss < 0.01.'.format(
                    checkpoint_dir))
                break