Exemplo n.º 1
0
print('X_train shape = ' + str(np.shape(X_train)))
print('y_train shape = ' + str(np.shape(y_train)))
print('X_test shape = ' + str(np.shape(X_test)))
print('y_test shape = ' + str(np.shape(y_test)))

# get number of input and output variables
X_DIM = np.shape(X_train)[1]
Y_DIM = np.shape(y_train)[1]
print('x_dim = ' + str(X_DIM) + ', y_dim = ' + str(Y_DIM))

###################
# Initialise XCSF
###################

# initialise XCSF for supervised learning
xcs = xcsf.XCS(X_DIM, Y_DIM, 1)

# override default.ini
xcs.OMP_NUM_THREADS = 8
xcs.POP_SIZE = 500
xcs.MAX_TRIALS = 1000  # number of trials per fit()
xcs.LOSS_FUNC = 'onehot'  # one-hot encoding classification error
xcs.E0 = 0.01  # 1% target error
xcs.ALPHA = 0.1  # accuracy offset
xcs.NU = 5  # accuracy slope
xcs.action('integer')  # (dummy) integer actions

condition_layers = {
    'layer_0': { # hidden layer
        'type': 'connected',
        'activation': 'selu',
Exemplo n.º 2
0
            return self.max_payoff
        return 0


# Create new real-multiplexer problem
mux = Mux(6)
X_DIM = mux.n_bits
N_ACTIONS = mux.n_actions
MAX_PAYOFF = mux.max_payoff

###################
# Initialise XCSF
###################

# constructor = (x_dim, y_dim, n_actions)
xcs = xcsf.XCS(X_DIM, 1, N_ACTIONS)

# override default.ini
xcs.OMP_NUM_THREADS = 8  # number of CPU cores to use
xcs.POP_SIZE = 1000  # maximum population size
xcs.E0 = 0.01  # target error
xcs.BETA = 0.2  # classifier parameter update rate
xcs.THETA_EA = 25  # EA frequency
xcs.ALPHA = 0.1  # accuracy offset
xcs.NU = 5  # accuracy slope
xcs.EA_SUBSUMPTION = True
xcs.SET_SUBSUMPTION = True
xcs.THETA_SUB = 100  # minimum experience of a subsumer
xcs.action('integer')
xcs.condition('hyperrectangle', {'min': 0, 'max': 1, 'spread-min': 0.1})
xcs.prediction('nlms-linear', {
Exemplo n.º 3
0
train_Y = minmax_scale(train_Y, feature_range=(-1, 1))

# XCSF inputs must be 2D numpy arrays
if (len(np.shape(train_Y)) == 1):
    train_Y = np.reshape(train_Y, (train_Y.shape[0], 1))

print("train_X shape = " + str(np.shape(train_X)))
print("train_Y shape = " + str(np.shape(train_Y)))

# get number of input and output variables
xvars = np.shape(train_X)[1]
yvars = np.shape(train_Y)[1]
print("xvars = " + str(xvars) + " yvars = " + str(yvars))

# initialise XCSF
xcs = xcsf.XCS(xvars, yvars)

# override cons.txt
xcs.POP_SIZE = 5000
xcs.MAX_TRIALS = 1000  # number of trials per fit()
xcs.COND_TYPE = 0  # hyperrectangle conditions
xcs.PRED_TYPE = 4  # neural network predictors
xcs.HIDDEN_NEURON_ACTIVATION = 0  # logistic

##################################
# Example plotting in matplotlib
##################################

n = 50  # 50,000 trials
evals = np.zeros(n)
psize = np.zeros(n)
Exemplo n.º 4
0
# 10% of training for validation
X_train, X_val, y_train, y_val = \
    train_test_split(X_train, y_train, test_size=0.1)

print('X_train shape = ' + str(np.shape(X_train)))
print('y_train shape = ' + str(np.shape(y_train)))
print('X_val shape = ' + str(np.shape(X_val)))
print('y_val shape = ' + str(np.shape(y_val)))
print('X_test shape = ' + str(np.shape(X_test)))
print('y_test shape = ' + str(np.shape(y_test)))

###################
# Initialise XCSF
###################

xcs = xcsf.XCS(X_DIM, Y_DIM, 1) # initialise XCSF for supervised learning

# override default.ini
xcs.OMP_NUM_THREADS = 8 # number of CPU cores to use
xcs.POP_SIZE = 500 # maximum population size
xcs.MAX_TRIALS = 1000 # number of trials per fit()
xcs.LOSS_FUNC = 'mse' # mean squared error
xcs.E0 = 0.005 # target error
xcs.ALPHA = 1 # accuracy offset
xcs.NU = 20 # accuracy slope
xcs.THETA_EA = 50 # EA invocation frequency
xcs.THETA_DEL = 50 # min experience before fitness used in deletion
xcs.BETA = 0.1 # update rate for error, etc.
xcs.action('integer') # (dummy) integer actions

tree_args = {
Exemplo n.º 5
0
                                                    data.target,
                                                    test_size=0.2)
X_train = minmax_scale(X_train, feature_range=(0, 1))
X_test = minmax_scale(X_test, feature_range=(0, 1))
features = 30
classes = 2
train_len = len(X_train)
test_len = len(X_test)
print("train len = %d, test len = %d" % (train_len, test_len))

###################
# Initialise XCSF
###################

# initialise XCSF for single-step reinforcement learning
xcs = xcsf.XCS(features, classes, False)

# override default.ini
xcs.OMP_NUM_THREADS = 8
xcs.POP_SIZE = 1000
xcs.PERF_TRIALS = 1000
xcs.EPS_0 = 0.01  # target error
xcs.COND_TYPE = 1  # hyperrectangles
xcs.PRED_TYPE = 1  # linear least squares
xcs.ACT_TYPE = 0  # integers

xcs.print_params()

#####################
# Execute experiment
#####################
Exemplo n.º 6
0
def rmux_answer(state):
    pos = pos_bits
    for i in range(pos_bits):
        if state[i] > 0.5:
            pos += pow(2, pos_bits - 1 - i)
    if state[pos] > 0.5:
        return 1
    return 0


###################
# Initialise XCSF
###################

# initialise XCSF for single-step reinforcement learning
xcs = xcsf.XCS(mux, 2, False)

# override default.ini
xcs.OMP_NUM_THREADS = 8
xcs.POP_SIZE = 1000
xcs.PERF_TRIALS = 1000
xcs.EPS_0 = 0.01  # target error
xcs.COND_TYPE = 1  # hyperrectangles
xcs.PRED_TYPE = 1  # linear least squares
xcs.ACT_TYPE = 0  # integers

xcs.print_params()

#####################
# Execute experiment
#####################
Exemplo n.º 7
0
        return Maze.OPTIMAL[self.name]

    def max_payoff(self):
        """ Returns the reward for reaching the goal state. """
        return float(Maze.MAX_PAYOFF)


###################
# Initialise XCSF
###################

# initialise XCSF for reinforcement learning
X_DIM = 8
Y_DIM = 1
N_ACTIONS = 8
xcs = xcsf.XCS(X_DIM, Y_DIM, N_ACTIONS)

# override default.ini
xcs.OMP_NUM_THREADS = 8
xcs.POP_SIZE = 1000
xcs.PERF_TRIALS = 50
xcs.E0 = 0.001  # target error
xcs.BETA = 0.2  # classifier parameter update rate
xcs.THETA_EA = 25  # EA frequency
xcs.ALPHA = 0.1  # accuracy offset
xcs.NU = 5  # accuracy slope
xcs.EA_SUBSUMPTION = True
xcs.SET_SUBSUMPTION = True
xcs.THETA_SUB = 100  # minimum experience of a subsumer
xcs.action('integer')  # integer actions
xcs.condition('ternary', {'bits': 2})  # ternary conditions: 2-bits per float
Exemplo n.º 8
0
        ax.annotate(text, xy=(0, 100), xytext=(-40, 1), fontsize=12, bbox=bbox)

    anim = animation.FuncAnimation(plt.gcf(),
                                   animate,
                                   frames=len(frames),
                                   interval=100,
                                   blit=False)
    anim.save(path + filename, writer='imagemagick', fps=30)


###################
# Initialise XCSF
###################

# constructor = (x_dim, y_dim, n_actions)
xcs = xcsf.XCS(X_DIM, N_ACTIONS, 1)  # Supervised mode: i.e, single action

xcs.OMP_NUM_THREADS = 8  # number of CPU cores to use
xcs.POP_INIT = False  # use covering to initialise
xcs.MAX_TRIALS = 1  # one trial per fit
xcs.POP_SIZE = 200  # maximum population size
xcs.E0 = 0.001  # target error
xcs.BETA = 0.05  # classifier parameter update rate
xcs.ALPHA = 1  # accuracy offset
xcs.NU = 5  # accuracy slope
xcs.EA_SUBSUMPTION = False
xcs.SET_SUBSUMPTION = False
xcs.THETA_EA = 100  # EA invocation frequency
xcs.THETA_DEL = 100  # min experience before fitness used for deletion

condition_layers = {