Example #1
0
File: agent.py Project: NU-1/FFNET
 def __init__(self, layers, batch, explore, explore_l, explore_d, learning,
              decay, path):
     # layers: architecture of Q network
     # batch: number of observations in mini-batch set
     # explore: exploration rate
     # decay: future reward decay rate
     # path: model path
     self.layers = layers
     self.batch_size = batch
     self.decay_rate = decay
     self.learning_rate = learning
     self.explore_low = explore_l
     self.explore_decay = explore_d
     self.explore_rate = explore
     self.directory = path
     self.num_action = self.layers[len(self.layers) - 1].num_output
     ##### build Q network
     self.Q = NeuralNet(self.layers, self.learning_rate, 'mean_square',
                        'RMSprop')
     self.Q.initialize()
     ##### data-related variables
     self.feat = []
     self.gt = []
     self.memory = Memo()
     self.selection = []
Example #2
0
class Model(Observable, Observer):
    def __init__(self, structure):

        Observable.__init__(self)

        self.neural_net = NeuralNet(structure, random_init_bound=0.05)
        self.commands = []

    def load(self, path):
        self.neural_net.load(path)

    def predict(self, x):
        return self.neural_net.evaluate(x)

    def update(self, command):
        self.notify_observers()

    def add_command(self, command):
        self.commands.append(command)
        command.add_observer(self)
        self.notify_observers()

    def undo(self):
        if self.commands:
            self.commands.pop()
            self.notify_observers()

    def clear(self):
        self.commands.clear()
        self.notify_observers()
Example #3
0
 def __init__(self, crop: bool = False):
     self.to_crop = crop
     try:
         self.net = NeuralNet()
     except Exception as e:
         print(f"Failed while NN initialization. Error: {e}")
         raise e
     print("Aim assistant successfully initialized")
Example #4
0
def train(net: NeuralNet,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = MSE(),
          optimizer: Optimizer = SGD()
          ) -> None:
    for epoch in range(num_epochs):
        epoch_loss = 0.0
        for batch in iterator(inputs, targets):
            predicted = net.forward(batch.inputs)
            epoch_loss += loss.loss(predicted, batch.targets)
            grad = loss.grad(predicted, batch.targets)
            net.backward(grad)
            optimizer.step(net)
        print(epoch, epoch_loss)
Example #5
0
def main(weight_file=None):
    if weight_file:
        with open(weight_file, "r") as f:
            weights = json.load(f)
        mlp = NeuralNet(weights["mlp"])
        lr = NeuralNet(weights["lr"])
    else:
        mlp = NeuralNet.create(10, 10, 1)
        lr = NeuralNet.create(10, 1)
        train(mlp)
        train(lr)
        with open("weights.json", "w") as f:
            json.dump({
                "mlp": mlp.weights,
                "lr": lr.weights,
            }, f)
    tournament(NeuralNetAgent("MLP", mlp), NeuralNetAgent("LR", lr))
Example #6
0
 def __init__(self,
              file,
              template,
              method='chauvenet',
              nn_params=None,
              verbose=False,
              **kwargs):
     self.file = file
     if "cal" in self.file:
         raise ValueError(f"File {self.file} is not in PSR format.")
     elif "59071" in self.file:
         raise ValueError(f"Not doing 59071...")
     self.method = method
     self.verbose = verbose
     self.ar = Archive(file, verbose=False)
     if method != 'NN':
         _, self.template = u.get_data_from_asc(template)
         self.opw = u.get_1D_OPW_mask(self.template, windowsize=128)
         self.omit, self.rms_mu, self.rms_sigma = self.get_omission_matrix(
             **kwargs)
         unique, counts = np.unique(self.omit, return_counts=True)
         print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%")
         print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%")
     elif nn_params != None:
         df = pd.DataFrame(
             np.reshape(self.ar.getData(),
                        (self.ar.getNsubint() * self.ar.getNchan(),
                         self.ar.getNbin())))
         scaler = MinMaxScaler()
         scaled_df = scaler.fit_transform(df.iloc[:, :])
         scaled_df = pd.DataFrame(scaled_df)
         self.x = scaled_df.iloc[:, :].values.transpose()
         self.nn = NeuralNet(self.x, np.array([[0], [0]]))
         self.nn.dims = [self.ar.getNbin(), 512, 10, 13, 8, 6, 6, 4, 4, 1]
         self.nn.threshold = 0.5
         self.nn.load_params(root=nn_params)
         self.omit = self.nn_get_omission()
         np.set_printoptions(threshold=sys.maxsize)
         unique, counts = np.unique(self.omit, return_counts=True)
         print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%")
         print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%")
     else:
         sys.exit()
Example #7
0
File: qnn.py Project: dkotfis/NeuRL
 def __init__(self, nactions, input_size, max_experiences=500, gamma=0.6, alpha=0.1, use_sarsa=False):
     #Default uses 2 hidden layers of equal size
     lay = [input_size, int((nactions+input_size)/2.0), int((nactions+input_size)/2.0), nactions]
     self.nactions = nactions
     self.NN = NeuralNet(layers=lay, epsilon=0.154, learningRate=alpha)
     self.experiences = []
     self.max_experiences = max_experiences
     self.gamma = gamma
     self.use_sarsa = use_sarsa
     self.prob_remember = 0.1
     self.num_replay_samples = 10
Example #8
0
def load_model():
    '''
	Loads a pre-trained model and settings used to generate it.
	'''
    try:
        with open(f'{PATH_TO_MODEL}.json', 'r') as json_file:
            settings = json.load(json_file)
        model = NeuralNet(len(settings['all_labels']))
        model.load_state_dict(
            torch.load(f'{PATH_TO_MODEL}.pth',
                       map_location=torch.device('cpu')))
    except:
        print('Could not locate a trained model.')
        sys.exit()
    model.eval()
    return model, settings
Example #9
0
with open("cost_iris.pkl", "rb") as c:
    cost = pickle.load(c)

with open("acc_iris.pkl", "rb") as c:
    accuracy = pickle.load(c)

# Plot training cost and accuracy vs epochs
plt.figure(1)
plt.plot(list(range(0, 1000, 10)), cost)
plt.ylabel("Training Error/Cost"), plt.xlabel("Epochs")
plt.figure(2)
plt.plot(list(range(0, 1000, 10)), accuracy)
plt.ylabel("Training Accuracy"), plt.xlabel("Epochs")
plt.show()

op_nodes = 3
iris = load_iris()
X = iris.data
y = np.array([np.eye(op_nodes)[i] for i in iris.target])

# Shuffle
random = np.random.permutation(len(X))
X = X[random]
y = y[random]

# Load the model
nnet = NeuralNet(load=True, model_file="trained.pkl")
op = nnet.predict(X, return_max=True)

print("-" * 30 + "\nTEST SET ACCURACY\n" + "-" * 30)
print(classification_report(y.argmax(axis=1), op))
data_x, data_y = None, None
if (args.dataset == "train"):
    data_x, data_y = train_x, train_y
elif (args.dataset == "test"):
    data_x, data_y = test_x, test_y
elif (args.dataset == "valid"):
    data_x, data_y = valid_x, valid_y

test_dataset = Dataset(root_dir, data_x, data_y, transforms=transform)
test_generator = torch.utils.data.DataLoader(test_dataset, **params)

print("Loaded dataloaders...")

criterion = torch.nn.CrossEntropyLoss()
model = NeuralNet(0.001, criterion, 64, 2)
state_dict = torch.load(model_name)
model.load_state_dict(state_dict)
for parameter in model.parameters():
    parameter.requires_grad = False
if (use_cuda):
    model.cuda()
model.eval()
summary(model, (1, 64, 64))

print("Loaded model...")

preds = []
labels = []

for local_batch, local_labels in tqdm(test_generator):
Example #11
0
# dataset = datasets.load_digits()

X = digitsX[:]
y = digitsY[:]

n,d = X.shape
nTrain = 0.2*n  #training on 50% of the data

# shuffle the data
# idx = np.arange(n)
# np.random.seed(13)
# np.random.shuffle(idx)
# X = X[idx]
# y = y[idx]

# split the data
Xtrain = X[:nTrain,:]
ytrain = y[:nTrain]
# Xtest = X[nTrain:,:]
# ytest = y[nTrain:]

model = NeuralNet(np.array([25]), .80, 0.12, 600)  # 100 @ 2.5 = 0.885, 400 @ 1.6 = 0.88, 1000 @ 1 = 0.8542, 
model.fit(X,y)
ypred = model.predict(Xtrain)

accuracy = accuracy_score(ytrain, ypred)

print "NeuralNet Accuracy = "+str(accuracy)

# model.visualizeHiddenNodes('hiddenLayers.png')
Example #12
0
"""
Sample of function that can't be learnt 
with simple linear model
"""

import numpy as np

from train import train
from nn import NeuralNet
from layers import Linear, Tanh

inputs = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])

targets = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])

net = NeuralNet([
    Linear(input_size=2, output_size=2),
    # not able to learn xor  function just with linear layer
    Tanh(),
    Linear(input_size=2, output_size=2)
])

train(net, inputs, targets)

for x, y in zip(inputs, targets):
    predicted = net.forward(x)
    print(x, predicted, y)
from numpy import loadtxt, ones, zeros, where
import numpy as np
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import sys, traceback

from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from nn import NeuralNet
# load the data set
filename = 'data/digitsX.dat'
data = loadtxt(filename, delimiter=',')
X = data[:,:]
filename = 'data/digitsY.dat'
data1 = loadtxt(filename, delimiter=',')
y = data1
layers = np.array([25])

clf = NeuralNet(layers = layers, learningRate = 2.0, numEpochs = 10)
clf.fit(X,y)
predicted = clf.predict(X)
# print predicted
print np.mean(predicted == y)
Example #14
0
import numpy as np
from sklearn import datasets
from sklearn.metrics import accuracy_score
from nn import NeuralNet

filename_X = 'data/digitsX.dat'
filename_y = 'data/digitsY.dat'
X = np.loadtxt(filename_X, delimiter=',')
y = np.loadtxt(filename_y, delimiter=',')

# takes roughly 1s for each epoch
clf_NN = NeuralNet(layers=np.array([25]), learningRate=2.0, numEpochs=450)
clf_NN.fit(X, y)
y_predict = clf_NN.predict(X)
accuracy_NN = accuracy_score(y_predict, y)

print "Accuracy: \t" + str(accuracy_NN)
Example #15
0
from nn import NeuralNet
from node import Node

nn=NeuralNet()
print "Enter trained Neural Net filename"
infile=raw_input()
print "Enter test filename"
testfile = raw_input()
print "Enter output filename"
results = raw_input()
nn.importFile(infile)
nn.test(testfile,results);
Example #16
0
 def step(self, net: NeuralNet) -> None:
     for param, grad in net.params_and_grads():
         param -= self.lr * grad  #adjust params by declining learning rate * gradient
Example #17
0
class Zap():
    """
    Master class for zapping data.
    Requires:

    file        -       .FITS (must be PSRFITS v5+ format)

    Optional:

    template    -       ASCII format:       BIN#    Flux           (Required if not doing NN exicison)
    method      -       Either 'chauvenet', 'DMAD' or 'NN'
    verbose     -       Prints more information to the console
    **kwargs    -       Get parsed to plot.histogram_and_curves() or
    """
    def __init__(self,
                 file,
                 template,
                 method='chauvenet',
                 nn_params=None,
                 verbose=False,
                 **kwargs):
        self.file = file
        if "cal" in self.file:
            raise ValueError(f"File {self.file} is not in PSR format.")
        elif "59071" in self.file:
            raise ValueError(f"Not doing 59071...")
        self.method = method
        self.verbose = verbose
        self.ar = Archive(file, verbose=False)
        if method != 'NN':
            _, self.template = u.get_data_from_asc(template)
            self.opw = u.get_1D_OPW_mask(self.template, windowsize=128)
            self.omit, self.rms_mu, self.rms_sigma = self.get_omission_matrix(
                **kwargs)
            unique, counts = np.unique(self.omit, return_counts=True)
            print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%")
            print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%")
        elif nn_params != None:
            df = pd.DataFrame(
                np.reshape(self.ar.getData(),
                           (self.ar.getNsubint() * self.ar.getNchan(),
                            self.ar.getNbin())))
            scaler = MinMaxScaler()
            scaled_df = scaler.fit_transform(df.iloc[:, :])
            scaled_df = pd.DataFrame(scaled_df)
            self.x = scaled_df.iloc[:, :].values.transpose()
            self.nn = NeuralNet(self.x, np.array([[0], [0]]))
            self.nn.dims = [self.ar.getNbin(), 512, 10, 13, 8, 6, 6, 4, 4, 1]
            self.nn.threshold = 0.5
            self.nn.load_params(root=nn_params)
            self.omit = self.nn_get_omission()
            np.set_printoptions(threshold=sys.maxsize)
            unique, counts = np.unique(self.omit, return_counts=True)
            print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%")
            print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%")
        else:
            sys.exit()

    def nn_get_omission(self):
        pred = np.around(np.squeeze(self.nn.pred_data(self.x, False)),
                         decimals=0).astype(np.int)
        pred = np.reshape(pred, (self.ar.getNsubint(), self.ar.getNchan()))

        return pred

    def get_omission_matrix(self, **kwargs):

        rms, lin_rms, mu, sigma = u.rms_arr_properties(
            self.ar.getData(), self.opw, 1.0)  # Needs to input 2D array

        # Creates the histogram
        plot.histogram_and_curves(
            lin_rms,
            mean=mu,
            std_dev=sigma,
            bins=(self.ar.getNchan() * self.ar.getNsubint()) // 4,
            x_axis='Root Mean Squared',
            y_axis='Frequency Density',
            title=r'$M={},\ \sigma={}$'.format(mu, sigma),
            **kwargs)

        if self.method == 'chauvenet':
            rej_arr = physics.chauvenet(rms,
                                        median=mu,
                                        std_dev=sigma,
                                        threshold=2.0)
        elif self.method == 'DMAD':
            rej_arr = physics.DMAD(lin_rms, threshold=3.5)
            rej_arr = np.reshape(rej_arr,
                                 (self.ar.getNsubint(), self.ar.getNchan()))

        if self.verbose:
            print("Rejection criterion created.")

        return rej_arr, mu, sigma

    def plot_mask(self, **kwargs):

        fig = plt.figure(figsize=(7, 7))
        ax = fig.add_subplot(111)
        ax.imshow(self.omit.T,
                  cmap=plt.cm.gray,
                  interpolation='nearest',
                  aspect='auto')
        plt.show()

    def save_training_set(self, val_size=0.2):
        # From Chauvenet or DMAD. 1 is bad channel

        with open(
                f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.training',
                'w') as t:
            t.write(
                f'# Training set for {self.ar.getName()} taken on {int(self.ar.getMJD())} at {self.ar.getFrontend()}\n'
            )
        with open(
                f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.validation',
                'w') as t:
            t.write(
                f'# Validation set for {self.ar.getName()} taken on {int(self.ar.getMJD())} at {self.ar.getFrontend()}\n'
            )

        ps_0 = np.zeros(2049)[np.newaxis, :]
        ps_1 = np.zeros(2049)[np.newaxis, :]

        d = self.ar.getData().reshape(
            (self.ar.getNchan() * self.ar.getNsubint(), self.ar.getNbin()))
        omission = self.omit.reshape(
            (self.ar.getNchan() * self.ar.getNsubint()))

        i = 1
        for omit, profile in zip(omission, d):
            try:
                choice = int(omit)
                if choice == 1:
                    choice = 0
                elif choice == 0:
                    choice = 1
            except ValueError:
                choice = -1

            print(i, end='\r')

            if choice != -1:
                # Creates the profile / choice pairs and doubles up with the reciprocal profiles.
                p = np.append(profile, choice)
                #inv_p = np.append( -1*profile, choice )
                if choice == 0:
                    ps_0 = np.append(ps_0, p[np.newaxis, :], axis=0)
                else:
                    ps_1 = np.append(ps_1, p[np.newaxis, :], axis=0)

            i += 1

        ps_0, ps_1 = np.delete(ps_0, 0, 0), np.delete(ps_1, 0, 0)

        # Sort into training / validation sets
        train, validation = train_test_split(ps_0, test_size=val_size)
        ones_t, ones_v = train_test_split(ps_1, test_size=val_size)
        train, validation = np.append(train, ones_t,
                                      axis=0), np.append(validation,
                                                         ones_v,
                                                         axis=0)

        np.random.shuffle(train), np.random.shuffle(validation)

        for k in train:
            with open(
                    f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.training',
                    'a') as t:
                np.savetxt(t, k, fmt='%1.5f ', newline='')
                t.write("\n")
                #np.savetxt( t, inv_p, fmt = '%1.5f ', newline = '' )
                #t.write( "\n" )

        for k in validation:
            with open(
                    f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.validation',
                    'a') as t:
                np.savetxt(t, k, fmt='%1.5f ', newline='')
                t.write("\n")

    # Save as ASCII text file
    def save(self, outroot="zap_out", ext='.ascii'):
        outfile = outroot + ext
        with open(outfile, 'w+') as f:
            for i, t in enumerate(self.omit):
                for j, rej in enumerate(t):
                    if rej == True:
                        f.write(str(i) + " " + str(self.ar.freq[i][j]) + "\n")
                        #f.write( f'{k} {self.ar.freq[k][i]}\n' )
        return outfile
Example #18
0
    train_interface.train(cfg.TRAINING.EPOCHS)


def test(network):
    test_loader = get_dataloader(cfg.DATASET.NAME, cfg.DATASET.PATH, 0,
                                 None, smoothing=cfg.DATASET.TARGET_SMOOTHING,
                                 normalize=cfg.DATASET.NORMALIZE, test=True)

    interface = Trainer(network, None, None, test_loader)
    interface.validate()
    accuracy = interface.val_accuracy[-1]
    logger.info(f'TEST Accuracy: {accuracy:.4f}')


def get_args():
    parser = argparse.ArgumentParser(description='Train a neural network')
    parser.add_argument('--cfg', type=str, help='Config containing network, dataset, and training information')
    return parser.parse_args()


if __name__ == '__main__':
    args = get_args()

    cfg.merge_from_file(args.cfg)

    net_cfg = cfg.NETWORK
    nn = NeuralNet(net_cfg.INPUTS, net_cfg.HIDDEN_LAYERS, net_cfg.OUTPUTS)

    train(nn)
    test(nn)
# neural nets doesnt really make sense

import math
import numpy as np
import sys

from import_train import import_training_file
from import_train import rmsle
from sklearn.neural_network import BernoulliRBM
from nn import NeuralNet


if __name__ == '__main__':
  (X, y) = import_training_file(sys.argv[1], True)
  hidden_layers = [5]
  learningRate = 1.6
  epsil = 0.12  
  eps = 1000

  neural_network = NeuralNet(hidden_layers, learningRate, epsilon=epsil, numEpochs=eps)
  neural_network.fit(X, y)
  nn_predict = neural_network.predict(X)
Example #20
0
# names of test videos
test_name = ['MP7']
test_num = 1

# define neural network layout
l1 = Layer(4096, 400, 'relu')
l2 = Layer(400, 200, 'relu')
l3 = Layer(200, 100, 'relu')
l4 = Layer(100, 25, 'linear')
layers = [l1, l2, l3, l4]
learning_rate = 0.0002
loss_type = 'mean_square'
opt_type = 'RMSprop'

Q = NeuralNet(layers, learning_rate, loss_type, opt_type)
Q.recover('model/', 'Q_net_all_11_0_1000')

for i in range(test_num):

    video = Episode(i, test_num, test_name, feat_path, gt_path)
    frame_num = np.shape(video.feat)[0]

    summary = np.zeros(frame_num)
    Q_value = []
    id_curr = 0
    while id_curr < frame_num:
        action_value = Q.forward([video.feat[id_curr]])
        a_index = np.argmax(action_value[0])
        id_next = id_curr + a_index + 1
        if id_next > frame_num - 1:
'''
    AUTHOR Wenqi Xian
'''

from numpy import loadtxt
import numpy as np

from nn import NeuralNet

X_train = loadtxt('data/digitsX.dat', delimiter=',')
y_train = loadtxt('data/digitsY.dat', delimiter=',')
layers = np.array([25])

NN = NeuralNet(layers = layers, learningRate = 1.8, numEpochs = 700)
NN.fit(X_train,y_train)
predicted = NN.predict(X_train)
accuracy = 100.0 * (predicted == y_train).sum() / y_train.shape[0]
print accuracy
Example #22
0
File: qnn.py Project: dkotfis/NeuRL
class QNN():
    """
    Neural Q-Network. Includes experience replay.
    nactions: the number of actions
    input_size: the number of inputs
    max_experiences: the total number of experiences to save for replay
    gamma: future rewards discount rate
    alpha: learning rate for underlying NN
    use_sarsa: flag whether to use the SARSA update rule
    """
    def __call__(self,s,a=None):
        """ implement here the returned Qvalue of state (s) and action(a)
        e.g. Q.GetValue(s,a) is equivalent to Q(s,a)
        """
        if a==None:
            return self.GetValue(s)
        return self.GetValue(s,a)

    def __init__(self, nactions, input_size, max_experiences=500, gamma=0.6, alpha=0.1, use_sarsa=False):
        #Default uses 2 hidden layers of equal size
        lay = [input_size, int((nactions+input_size)/2.0), int((nactions+input_size)/2.0), nactions]
        self.nactions = nactions
        self.NN = NeuralNet(layers=lay, epsilon=0.154, learningRate=alpha)
        self.experiences = []
        self.max_experiences = max_experiences
        self.gamma = gamma
        self.use_sarsa = use_sarsa
        self.prob_remember = 0.1
        self.num_replay_samples = 10

    def GetValue(self, s, a=None):
        """ Return the Q(s,a) value of state (s) for action (a)
        or all values for Q(s)
        """
        out = self.NN.propagate(s)
        if (a==None):
            return out
        return out[a]

    def Update(self, s1, a1, r, s2, a2):
        """ update action value for action(a)
        """
        if (self.use_sarsa):
            v = r + self.gamma*self.GetValue(s2, a2)
        else:
            v = r + self.gamma*max(self.GetValue(s2))
        a = np.zeros(self.nactions)
        a[a1] = v
        self.NN.propagateAndUpdate(s1, a)

    def RememberExperience(self, s1, a1, r, s2, a2):
        if (random.random() < self.prob_remember):
            if (len(self.experiences) >= self.max_experiences):
                #TODO: Something more intelligent about how we determine what is worth forgetting
                self.experiences.pop(random.randint(0, self.max_experiences-1))
            self.experiences.append(Experience(s1, a1, r, s2, a2))

    def ExperienceReplay(self):
        #Skip until we have enough experience
        if (len(self.experiences) < self.num_replay_samples):
            return
        for i in xrange(self.num_replay_samples):
            index = random.randint(0, len(self.experiences)-1)
            exp = self.experiences[index]
            self.Update(exp.s1, exp.a1, exp.r, exp.s2, exp.a2)
Example #23
0
File: agent.py Project: NU-1/FFNET
class Agent(object):
    def __init__(self, layers, batch, explore, explore_l, explore_d, learning,
                 decay, path):
        # layers: architecture of Q network
        # batch: number of observations in mini-batch set
        # explore: exploration rate
        # decay: future reward decay rate
        # path: model path
        self.layers = layers
        self.batch_size = batch
        self.decay_rate = decay
        self.learning_rate = learning
        self.explore_low = explore_l
        self.explore_decay = explore_d
        self.explore_rate = explore
        self.directory = path
        self.num_action = self.layers[len(self.layers) - 1].num_output
        ##### build Q network
        self.Q = NeuralNet(self.layers, self.learning_rate, 'mean_square',
                           'RMSprop')
        self.Q.initialize()
        ##### data-related variables
        self.feat = []
        self.gt = []
        self.memory = Memo()
        self.selection = []

    # initialize with data
    def data_init(self, current_eps):
        self.feat = current_eps.feat
        self.gt = current_eps.gt

    # select an action based on policy
    def policy(self, id_curr):
        exploration = np.random.choice(
            range(2), 1, p=[1 - self.explore_rate, self.explore_rate])
        # exploration==1: explore
        # exploration==0: exploit
        if exploration == 1:  # exploration
            action_index = np.random.choice(range(self.num_action), 1)[0]

            #print('\r')
            #print('              explore:  '+str(action_index))
            #print('\r')
            action_value = self.Q.forward([self.feat[id_curr]])
            # record average Q value
            self.Q.ave_value.append(np.mean(action_value[0]))

            # print(action_value[0])
            return action_index
        else:  # exploitation
            action_value = self.Q.forward([self.feat[id_curr]])
            # record average Q value
            self.Q.ave_value.append(np.mean(action_value[0]))
            # self.Q.ave_value = np.append(self.Q.ave_value,np.mean(action_value[0]))

            action_index = np.argmax(action_value[0])

            #print('\r')
            ##print('exploit:  '+str(action_index))
            #print('\r')
            # print(action_value[0])
            return action_index

    # perform action to get next state
    def action(self, id_curr, a_index):
        id_next = id_curr + a_index + 1  #action 0,1,2,...
        return id_next

    # compute the reward
    # REWARD 3:reward with distribution and terms on length of skipping
    def reward(self, id_curr, a_index, id_next):
        gaussian_value = [
            0.0001, 0.0044, 0.0540, 0.2420, 0.3989, 0.2420, 0.0540, 0.0044,
            0.0001
        ]
        # skipping interval,missing part
        seg_gt = self.gt[0][id_curr + 1:id_next]
        total = len(seg_gt)
        n1 = sum(seg_gt)
        n0 = total - n1
        miss = (0.8 * n0 - n1) / 25  #largest action step.
        # accuracy
        acc = 0
        if id_next - 4 > -1:
            if self.gt[0][id_next - 4] == 1:
                acc = acc + 0.0001
        if id_next - 3 > -1:
            if self.gt[0][id_next - 3] == 1:
                acc = acc + 0.0044
        if id_next - 2 > -1:
            if self.gt[0][id_next - 2] == 1:
                acc = acc + 0.0540
        if id_next - 1 > -1:
            if self.gt[0][id_next - 1] == 1:
                acc = acc + 0.2420
        if self.gt[0][id_next] == 1:
            acc = acc + 0.3989
        if id_next + 1 < len(self.gt[0]):
            if self.gt[0][id_next + 1] == 1:
                acc = acc + 0.2420
        if id_next + 2 < len(self.gt[0]):
            if self.gt[0][id_next + 2] == 1:
                acc = acc + 0.0540

        if id_next + 3 < len(self.gt[0]):
            if self.gt[0][id_next + 3] == 1:
                acc = acc + 0.0044
        if id_next + 4 < len(self.gt[0]):
            if self.gt[0][id_next + 4] == 1:
                acc = acc + 0.0001
        r = miss + acc
        return r

    # update target Q value
    def update(self, r, id_curr, id_next, a_index):
        target = self.Q.forward([self.feat[id_curr]])  # target:[ [] ]
        target[0][a_index] = r + self.decay_rate * max(
            self.Q.forward([self.feat[id_next]])[0])
        return target

    # run an episode to get case set for training
    def episode_run(self):
        frame_num = np.shape(self.feat)[0]
        self.selection = np.zeros(frame_num)
        id_curr = 0
        self.selection[id_curr] = 1
        while id_curr < frame_num:
            a_index = self.policy(id_curr)
            id_next = self.action(id_curr, a_index)
            if id_next > frame_num - 1:
                break
            self.selection[id_next] = 1
            r = self.reward(id_curr, a_index, id_next)
            target_vector = self.update(r, id_curr, id_next, a_index)[0]
            input_vector = self.feat[id_curr]
            self.memorize(input_vector, target_vector)
            if self.memory.get_size() == self.batch_size:
                #print('training')
                self.train()
            id_curr = id_next

    # training Q net using one batch data
    def train(self):
        self.explore_rate = max(self.explore_rate - self.explore_decay,
                                self.explore_low)
        x = self.memory.state
        y = self.memory.target
        self.Q.train(x, y)
        self.memory.reset()

    # store current observation to memory
    def memorize(self, state, target):
        # observation: new observation (s,a,r,s')
        self.memory.add(state, target)

    # reset data-related variables
    def data_reset(self):
        self.feat = []
        self.gt = []
        self.selection = []

    # save trained Q net
    def save_model(self, filename):
        # module backup
        path = self.directory
        self.Q.saving(path, filename)
Example #24
0
    '''
    return [x >> i & 1 for i in range(10)]

inputs = np.array([
    binary_encode(x)
    for x in range(101, 1024)
])

targets = np.array([
    fizz_buzz_encode(x)
    for x in range(101, 1024)
])

net = NeuralNet([
    Linear(input_size = 10, output_size = 50),
    Tanh(),
    Linear(input_size = 50, output_size = 4)
])

train(net,
      inputs,
      targets,
      num_epochs = 5000,
      optimizer = SGD(lr = 0.001))

for x in range(1, 101):
    predicted = net.forward(binary_encode(x))
    predicted_idx = np.argmax(predicted)
    actual_idx = np.argmax(fizz_buzz_encode(x))
    labels = [str(x), "fizz", "buzz", "fizzbuzz"]
    print(x, labels[predicted_idx], labels[actual_idx])
Example #25
0
# neural nets doesnt really make sense

import math
import numpy as np
import sys

from import_train import import_training_file
from import_train import rmsle
from sklearn.neural_network import BernoulliRBM
from nn import NeuralNet

if __name__ == '__main__':
    (X, y) = import_training_file(sys.argv[1], True)
    hidden_layers = [5]
    learningRate = 1.6
    epsil = 0.12
    eps = 1000

    neural_network = NeuralNet(hidden_layers,
                               learningRate,
                               epsilon=epsil,
                               numEpochs=eps)
    neural_network.fit(X, y)
    nn_predict = neural_network.predict(X)
import matplotlib.pyplot as plt

use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")

root_dir = os.path.join("data")

classes_to_idx = {
    v: k
    for k, v in enumerate(
        open(os.path.join(root_dir, "classes.txt")).read().strip().split("\n"))
}
idx_to_classes = {v: k for k, v in classes_to_idx.items()}

criterion = nn.BCELoss()
model = NeuralNet(0.01, criterion, 256, len(classes_to_idx))
model.load_state_dict(torch.load(sys.argv[1]))
if use_cuda:
    model.cuda()
model.eval()

id = random.choice(os.listdir(os.path.join(root_dir, "images"))).split(".")[0]
data = torch.load(os.path.join(root_dir, "images", id + ".pt"))
true_labels = torch.load(os.path.join(root_dir, "labels", id + ".pt"))

diseases = []
for x in range(len(true_labels)):
    if (true_labels[x] == 1.0):
        diseases.append(idx_to_classes[x])

print(id)
Example #27
0
from nn import NeuralNet
from node import Node

nn = NeuralNet()
nn.importFile("sample.NNGrades.init")
nn.printFile("sup.txt")
Example #28
0
x_train, x_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.4,
                                                    random_state=27)

# Config
layer_nodes = [4, 10, 3]  # Add more elements for more layers
model_name = "iris2"
activation = "sigmoid"  # (sigmoid, tanh)
op_activation = "sigmoid"  # (sigmoid, softmax)
loss = "crossentropy"  # (crossentropy, mse)

# Build the architecture
nnet = NeuralNet(layer_nodes=layer_nodes,
                 name=model_name,
                 loss=loss,
                 activation=activation,
                 output_activation=op_activation)

# Training config
epochs = 2000
alpha = 1e-3
reg_para = 0.05
batch_size = 20
epochs_bw_details = 50
dropout_percent = 0.25  # Probability of a node dropping out
d_layers = [2]  # Only these layers will have dropout

# Training
cost, accuracy = nnet.train(x_train,
                            y_train,
"""
Train a model to predict exclusive or of its inputs
"""

import numpy as np
from nn import NeuralNet
from layers import Linear, Tanh
from train import train

inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

targets = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])

net = NeuralNet([
    Linear(input_size=2, output_size=2),
    Tanh(),
    Linear(input_size=2, output_size=2)
])

train(net, inputs, targets, num_epochs=5000)

for x, y in zip(inputs, targets):
    print(x, net.forward(x), y)
Example #30
0
    def __init__(self, structure):

        Observable.__init__(self)

        self.neural_net = NeuralNet(structure, random_init_bound=0.05)
        self.commands = []
Example #31
0
        return [0, 0, 0, 1]
    elif x % 5 == 0:
        return [0, 0, 1, 0]
    elif x % 3 == 0:
        return [0, 1, 0, 0]
    else:
        return [1, 0, 0, 0]


inputs = np.array([binary_encode(x) for x in range(101, 1024)])

targets = np.array([fizz_buzz_encode(x) for x in range(101, 1024)])

net = NeuralNet([
    Linear(input_size=10, output_size=50),
    Tanh(),
    Linear(input_size=50, output_size=4)
])

train(net, inputs, targets, num_epochs=5000, optimizer=SGD(lr=0.001))

for x in range(1, 101):
    inputs = binary_encode(x)
    prediction = net.forward(inputs)
    actual = fizz_buzz_encode(x)
    labels = [str(x), "fizz", "buzz", "fizzbuzz"]
    prediction_idx = np.argmax(prediction)
    actual_idx = np.argmax(actual)

    print(x, labels[prediction_idx], labels[actual_idx])
Example #32
0
"""
The canonical example of a function that can't be
learned with a simple linear model is XOR
"""
import numpy as np

from train import train
from nn import NeuralNet
from layers import Linear, Tanh

inputs = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])

targets = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])

net = NeuralNet([
    Linear(input_size=2, output_size=2),
    Tanh(),
    Linear(input_size=2, output_size=2)
])

train(net, inputs, targets)

for x, y in zip(inputs, targets):
    predicted = net.forward(x)
    print(x, predicted, y)
Example #33
0
from nn import NeuralNet
from node import Node

nn=NeuralNet()
print "Enter Neural Net filename"
infile=raw_input()
print "Enter training filename"
trainfile = raw_input()
print "Enter learning rate"
lrate = raw_input()
print "Enter # of Epochs"
ep = raw_input()
print "Enter output filename"
outfile = raw_input()
nn.importFile(infile)
nn.train(trainfile,float(lrate), int(ep));
nn.printFile(outfile)

import numpy as np
from numpy import loadtxt
from sklearn import datasets
from sklearn.metrics import accuracy_score

from nn import NeuralNet

# learning rate parameters to be trained by hand
numEpochs = 100
learningRate = 0.3
epsilon = 0.12
regularization_parameter = 0.001
nodes_in_hidden_layers = [25]

# load the data
filenameX = "data/digitsX.dat"
dataX = loadtxt(filenameX, delimiter=",")
filenameY = "data/digitsY.dat"
dataY = loadtxt(filenameY, delimiter=",")
n, d = dataX.shape

# create NeuralNet class
modelNN = NeuralNet(nodes_in_hidden_layers, epsilon, learningRate, numEpochs)

# train neural network on digits data
modelNN.fit(dataX, dataY)

# find the training accuracy

# report the training accuracy
Example #35
0
X = digitsX[:]
y = digitsY[:]

n, d = X.shape
nTrain = 0.2 * n  #training on 50% of the data

# shuffle the data
# idx = np.arange(n)
# np.random.seed(13)
# np.random.shuffle(idx)
# X = X[idx]
# y = y[idx]

# split the data
Xtrain = X[:nTrain, :]
ytrain = y[:nTrain]
# Xtest = X[nTrain:,:]
# ytest = y[nTrain:]

model = NeuralNet(
    np.array([25]), .80, 0.12,
    600)  # 100 @ 2.5 = 0.885, 400 @ 1.6 = 0.88, 1000 @ 1 = 0.8542,
model.fit(X, y)
ypred = model.predict(Xtrain)

accuracy = accuracy_score(ytrain, ypred)

print "NeuralNet Accuracy = " + str(accuracy)

# model.visualizeHiddenNodes('hiddenLayers.png')
Example #36
0
X = X[idx]
y = y[idx]

# split the data
Xtrain = X[:nTrain, :]
ytrain = y[:nTrain]
Xtest = X[nTrain:, :]
ytest = y[nTrain:]

# train the decision tree
modelDT = DecisionTreeClassifier()
modelDT.fit(Xtrain, ytrain)

# train the naive Bayes
layers = np.array(([25]))
modelNN = NeuralNet(layers=layers, learningRate =2, numEpochs=500, epsilon=.62)
modelNN.fit(Xtrain, ytrain)
ypred_NNtrain = modelNN.predict(Xtrain)

# output predictions on the remaining data
ypred_NN = modelNN.predict(Xtest)

# compute the training accuracy of the model
accuracyNT = accuracy_score(ytrain, ypred_NNtrain)
accuracyNN = accuracy_score(ytest, ypred_NN)

print "Training = "+str(accuracyNT)
print "Neural Net accuracy = "+str(accuracyNN)
modelNN.visualizeHiddenNodes("visualizeHiddenNodes.bmp")

Example #37
0
from nn import NeuralNet
from node import Node

nn=NeuralNet()
nn.importFile("sample.NNGrades.init")
nn.printFile("sup.txt")
 def step(self, net: NeuralNet) -> None:
     for param, grad in net.params_and_grads():
         param -= self.lr * grad
print("Loaded data...")

train_dataset = Dataset(root_dir,
                        train_x,
                        train_y,
                        transforms=transforms["train"])
valid_dataset = Dataset(root_dir,
                        valid_x,
                        valid_y,
                        transforms=transforms["val"])
training_generator = torch.utils.data.DataLoader(train_dataset, **params)
validation_generator = torch.utils.data.DataLoader(valid_dataset, **params)
print("Created datasets...")

criterion = torch.nn.CrossEntropyLoss()
model = NeuralNet(0.001, criterion, 64, 2)
if (use_cuda):
    model.cuda()
summary(model, (1, 64, 64))

print("Starting training...")
writer = SummaryWriter()

global_train_step = 0
global_val_step = 0
# Loop over epochs
for epoch in range(max_epochs):
    tqdm.write("Epoch: {}".format(epoch))

    progress_bar = tqdm(total=len(train_dataset), leave=True, position=0)