Exemple #1
0
def run(sample_num, dqn_model, isRandom):
    # Initialize LSTM model, allocate the cuda memory
    model_LSTM = MyLSTM(n_letters, hidden_size_LSTM, nlayers_LSTM, True, True,
                        hidden_dropout_prob_LSTM, bidirectional_LSTM,
                        batch_size_LSTM, cuda_LSTM)
    model_LSTM.cuda()

    # Load the data based on selection(sampled data or random data)
    if not isRandom:
        with open(
                'sampled_data/data_sampled_' + str(dqn_model) + '_' +
                str(sample_num), 'rb') as b:
            dataset_train = pickle.load(b)
        write_loss = open('sampled_data/data_sampled_dqn_loss_' +
                          str(dqn_model) + '_' + str(sample_num) + '.csv',
                          'w',
                          encoding='UTF-8',
                          newline='')
    else:
        with open(
                'sampled_data/data_sampled_random_' + str(dqn_model) + '_' +
                str(sample_num), 'rb') as b:
            dataset_train = pickle.load(b)
        write_loss = open('sampled_data/data_sampled_random_loss_' +
                          str(dqn_model) + '_' + str(sample_num) + '.csv',
                          'w',
                          encoding='UTF-8',
                          newline='')

    writer = csv.DictWriter(
        write_loss,
        fieldnames=['Epoch', 'Train_loss', 'Train_ppl', 'Val_loss'])

    # LSTM Training Part
    # At any point, you can hit Ctrl + C to break out of training early.
    try:
        for epoch in range(1, n_epoch + 1):
            print("# Epoch", epoch)

            model_LSTM, train_loss, train_ppl = w_t_RL.train(
                model_LSTM, dataset_train,
                epoch)  # Train LSTM based on dataset_labelled
            val_loss = w_t_RL.evaluate(model_LSTM, dataset_val,
                                       epoch)  # Evaluate current loss
            writer.writerow({
                'Epoch': str(epoch),
                'Train_loss': str(train_loss),
                'Train_ppl': str(train_ppl),
                'Val_loss': str(val_loss)
            })

    except KeyboardInterrupt:
        print('-' * 89)
        print('Exiting from training early')

    # write_loss.close()
    write_loss.close()
Exemple #2
0
                choice = random.randint(0,
                                        N_options - 1)  # Choose data randomly

            # dataset_train.append(data_list[choice]) # Add selected data into train dataset
            dataset_train = np.concatenate([dataset_train, data_list[choice]])

            # Update seen lists
            state = create_feature(data, uni_seen_list, bi_seen_list,
                                   tri_seen_list, i_10 * 10 + i + 1, True)
            state_list.append(state)
            print(state)

        loss_prev = w_t_RL.evaluate(model_LSTM, dataset_val,
                                    i_ep)  # Evaluate previous loss
        model_LSTM, loss_train, _ = w_t_RL.train(
            model_LSTM, dataset_train,
            i_ep)  # train LSTM based on dataset_labelled
        loss_curr = w_t_RL.evaluate(model_LSTM, dataset_val,
                                    i_ep)  # Evaluate current loss
        reward = (
            loss_prev - loss_curr
        ) / 10  # Reward(Difference between previous loss and current loss)

        print("# loss_prev, loss_cur, reward :", loss_prev, loss_curr, reward)
        '''
        # Save replay memory with "terminal" state when dataset is exhausted
        if i == len(dataset)//N_options-1:
            replay_memory.append([state,reward,"terminal"])
            break;

        state_prev = state # Save previous state
def train_LSTM(dataset, w_t_model):
    return w_t_RL.train(w_t_model, dataset)
Exemple #4
0
def run(sample_num, dqn_model, isRandom):
    # Initialize LSTM model, allocate the cuda memory
    model_LSTM = MyLSTM(n_letters, hidden_size_LSTM, nlayers_LSTM, True, True,
                        hidden_dropout_prob_LSTM, bidirectional_LSTM,
                        batch_size_LSTM, cuda_LSTM)
    model_LSTM.cuda()

    # Load the data based on selection(sampled data or random data)
    if not isRandom:
        with open(
                'sampled_data/data_sampled_' + str(dqn_model) + '_' +
                str(sample_num), 'rb') as b:
            dataset_train = pickle.load(b)

        with open(
                'sampled_data/value_sampled_' + str(dqn_model) + '_' +
                str(sample_num), 'rb') as b:
            dataset_value_dqn = pickle.load(b)

        write_loss = open('sampled_data/data_sampled_dqn_loss_' +
                          str(dqn_model) + '_' + str(sample_num) + '.csv',
                          'w',
                          encoding='UTF-8',
                          newline='')
        write_val_diff = open('sampled_data/val_sampled_dqn_diff_' +
                              str(dqn_model) + '_' + str(sample_num) + '.csv',
                              'w',
                              encoding='UTF-8',
                              newline='')
    else:
        with open(
                'sampled_data/data_sampled_random_' + str(dqn_model) + '_' +
                str(sample_num), 'rb') as b:
            dataset_train = pickle.load(b)

        with open(
                'sampled_data/value_sampled_random_' + str(dqn_model) + '_' +
                str(sample_num), 'rb') as b:
            dataset_value_dqn = pickle.load(b)

        write_loss = open('sampled_data/data_sampled_random_loss_' +
                          str(dqn_model) + '_' + str(sample_num) + '.csv',
                          'w',
                          encoding='UTF-8',
                          newline='')
        write_val_diff = open('sampled_data/val_sampled_random_diff_' +
                              str(dqn_model) + '_' + str(sample_num) + '.csv',
                              'w',
                              encoding='UTF-8',
                              newline='')

    writer = csv.DictWriter(
        write_loss,
        fieldnames=['Epoch', 'Train_loss', 'Train_ppl', 'Val_loss'])
    writer_value = csv.DictWriter(
        write_val_diff, fieldnames=['Epoch', 'Iteration', 'Reward', 'Value'])
    # LSTM Training Part
    # At any point, you can hit Ctrl + C to break out of training early.
    try:
        for epoch in range(1, n_epoch + 1):
            print("# Epoch", epoch)

            for i in range(len(dataset_train)
                           ):  # Loop through groups of N_options options
                loss_prev = w_t_RL.evaluate(model_LSTM, dataset_val,
                                            epoch)  # Evaluate previous loss
                model_LSTM, train_loss, train_ppl = w_t_RL.train(
                    model_LSTM, [dataset_train[i]],
                    epoch)  # Train LSTM based on dataset_labelled
                loss_curr = w_t_RL.evaluate(model_LSTM, dataset_val,
                                            epoch)  # Evaluate current loss
                reward = loss_prev - loss_curr  # Reward(Difference between previous loss and current loss)
                writer_value.writerow({
                    'Epoch': str(epoch),
                    'Iteration': str(i),
                    'Reward': str(reward),
                    'Value': str(dataset_value_dqn[i])
                })
                # print (reward, dataset_value_dqn[i])

            writer.writerow({
                'Epoch': str(epoch),
                'Train_loss': str(train_loss),
                'Train_ppl': str(train_ppl),
                'Val_loss': str(loss_curr)
            })

    except KeyboardInterrupt:
        print('-' * 89)
        print('Exiting from training early')

    # write_loss.close()
    write_loss.close()