Esempio n. 1
0
    def produce_new_version(self):
        new_net = NNet(self.input_nodes, self.output_nodes)
        new_net.train(self.examples)

        az = AlphaZero(self.state_encoder, self.exploration_rate,
                       self.number_of_mcts_simulations)
        az.net = new_net
        return az
Esempio n. 2
0
def train(table: str = c.DEFAULT_TRAINING_TABLE, model_name: str = c.DEFAULT_MODEL_NAME, matches: int = 10,
          threshold: int = c.DEFAULT_THRESHOLD, learning_rate: float = c.DEFAULT_LEARNING_RATE,
          epochs: int = c.DEFAULT_EPOCHS, batch_size: int = c.DEFAULT_BATCH_SIZE, data_limit: int = 600000) -> None:
    new_net = NNet(learning_rate=learning_rate, epochs=epochs, batch_size=batch_size, model_name=model_name)
    old_net = NNet(model_name=model_name)
    db = Connector()
    examples = db.df_to_examples(db.retrieve_data(
        query=f"SELECT * FROM {table} ORDER BY counter DESC LIMIT {data_limit};"
    ))
    new_net.train(examples)
    score = _match_series(nnet1=new_net, nnet2=old_net, matches=matches)
    _evaluate_score(new_net, score, model_name, threshold)
Esempio n. 3
0
def train_stream(episodes: int = 50, model_name: str = c.DEFAULT_MODEL_NAME, rollout: bool = True,
                 iterations: int = c.DEFAULT_ITERATIONS, matches: int = 10,
                 threshold: int = c.DEFAULT_THRESHOLD) -> None:
    learning_rate, epochs, batch_size = _sample_parameters()
    new_net = NNet(learning_rate=learning_rate, epochs=epochs, batch_size=batch_size)
    old_net = NNet()

    _train_new_net(episodes=episodes, new_net=new_net, rollout=rollout, iterations=iterations)
    score = _match_series(nnet1=new_net, nnet2=old_net, matches=matches)

    print(f'parameters: learning rate ={learning_rate}, epochs={epochs}, batch size={batch_size}')
    _evaluate_score(new_net, score, model_name, threshold)
Esempio n. 4
0
def _fast_match(nnet1: NNet, nnet2: NNet) -> int:
    game = Game()
    while True:
        if game.player == 1:
            policy = nnet1.prediction(state=game.game_state, player=game.player)[0]
        else:
            policy = nnet2.prediction(state=game.game_state, player=game.player)[0]
        game.make_move(random.choices(range(len(policy)), weights=policy)[0])
        if game.has_won(game.game_state, player=game.player * -1):
            return game.player * -1
        elif game.is_draw(game.game_state):
            return 0
Esempio n. 5
0
 def __init__(self, state_encoder, exploration_rate,
              number_of_mcts_simulations):
     super().__init__(state_encoder)
     self.exploration_rate = exploration_rate
     self.number_of_mcts_simulations = number_of_mcts_simulations
     self.visited_states = set()
     self.examples = []
     self.P = {}  #P[s][a] = policy value for move a in state s
     self.Q = {}  #Q[s][a] = q-value of taking action a from state s
     self.N = {
     }  #N[s][a] = number of times algorithm played action a from state s
     self.is_learning = False
     self.net = NNet(self.input_nodes, self.output_nodes)
Esempio n. 6
0
    def build(self):
        self._init_view()
        main_l = BoxLayout(orientation='vertical')

        self.nnet = NNet(n_input=INPUT,
                         n_hidden=HIDDEN,
                         n_output=OUTPUT,
                         learning_rate=LR)
        self._prepare_nnet()

        main_l.add_widget(self._paint_w)
        main_l.add_widget(self._conf_l)
        return main_l
Esempio n. 7
0
def _train_new_net(episodes: int, new_net: NNet, rollout: bool, iterations: int) -> None:
    print('-' * 20)
    examples = []
    for i in range(episodes):
        if rollout:
            new_examples = _run_episode(iterations=iterations)
        else:
            new_examples = _run_episode(nnet=new_net, iterations=iterations)
        for ex in new_examples:
            examples.append(ex)
            examples.append(_mirror_example(ex))
        sys.stdout.write(f'\repisode: {i + 1}/{episodes}')
        sys.stdout.flush()
    print('')
    new_net.train(examples)
Esempio n. 8
0
    def fetch_prediction(self, nnet: NNet, x_noise: float = 0.) -> None:
        self.policy, self.nnet_value = nnet.prediction(self.state, player=1)
        if not x_noise:
            return

        d = scipy.stats.dirichlet.rvs([1.0 for _ in range(c.COLUMNS)])[0]
        self.policy = (1 - x_noise) * self.policy + 1 * d
Esempio n. 9
0
 def __init__(self, filename, violation):
     self.nnet = NNet(filename)
     self.violation_path = violation
     self.input_vars = []
     self.output_vars = []
     self.relus = []
     self.relus_level = []
     self.formulae = []
     self.relu_fun = Symbol("relu", FunctionType(REAL, (REAL, )))
Esempio n. 10
0
class Main(App):
    def build(self):
        self._init_view()
        main_l = BoxLayout(orientation='vertical')

        self.nnet = NNet(n_input=INPUT,
                         n_hidden=HIDDEN,
                         n_output=OUTPUT,
                         learning_rate=LR)
        self._prepare_nnet()

        main_l.add_widget(self._paint_w)
        main_l.add_widget(self._conf_l)
        return main_l

    def _init_view(self):
        self._conf_l = BoxLayout(size_hint=(None, None),
                                 width=WINDOW_WIDTH,
                                 height=CONFIG_HEIGHT)
        self._paint_w = PaintWidget(size_hint=(None, None),
                                    width=WINDOW_WIDTH,
                                    height=PAINT_HEIGHT)
        self._clear_b = Button(text='clear')
        self._clear_b.bind(on_press=self.clear)
        self._query_b = Button(text='query')
        self._query_b.bind(on_press=self.query)

        self._conf_l.add_widget(self._clear_b)
        self._conf_l.add_widget(self._query_b)

    def _prepare_nnet(self):
        try:
            self.nnet.restore(MODEL_PATH)
        except:
            from tensorflow.examples.tutorials.mnist import input_data
            mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)

            for episode in range(EPISODES):
                batch_input, batch_target = mnist.train.next_batch(BATCH_SIZE)

                if episode % 100 == 0: status = True
                else: status = False

                self.nnet.train(batch_input, batch_target, status)

            self.nnet.save(MODEL_PATH)

    def clear(self, instance):
        self._paint_w.clear_canvas()

    def query(self, instance):
        predict = str(
            self.nnet.predict(
                self._paint_w.get_prepared_data(
                    (28, 28)).reshape(1, INPUT) / 255)[0])
        Popup(title='predict',
              content=Label(text=predict),
              size_hint=(None, None),
              size=(200, 200)).open()
def main(argv=None):

    dataset_1 = gen_splitMNIST([0, 4])
    dataset_2 = gen_splitMNIST([5, 9])
    tasks = [dataset_1, dataset_2]
    nnet = NNet()
    results = train_nnet(tasks, nnet, owm_mode=FLAGS.owm)
    fn = datetime.now().strftime('%Y-%m-%d--%H-%M-%S') + '_owm_' + FLAGS.owm
    save_pickle(fn, FLAGS.logging_dir, results)
    sio.savemat(FLAGS.logging_dir + fn, results)
    def __init__(self, config_file):
        config = configparser.ConfigParser()
        config.read(config_file, encoding="utf-8")
        height = int(config["DATA"]["height"])
        width = int(config["DATA"]["width"])
        channel = int(config["DATA"]["channel"])
        input_shape = (height, width, channel)
        dense_size = int(config["MODEL"]["num_class"])
        model_name = config["MODEL"]["model_name"]
        exp_dir = config["MODEL"]["exp_dir"]
        model_weight_path = os.path.join(exp_dir, "model", model_name + ".h5")

        self.nnet = NNet(input_shape=input_shape, num_class=dense_size)
        self.nnet.model.load_weights(model_weight_path)
Esempio n. 13
0
    def decide_move(self, method: str, iterations: int, model_name: str = c.DEFAULT_MODEL_NAME,
                    print_out: bool = True) -> Union[int, np.ndarray]:
        """
        Outputs a move for the current game state determined by a specified method.
        
        :param method: 'nnet': neural network with tree search, 'simple_nnet': neural network without tree search,
            'mcts': Monte Carlo tree search, 'input': input via terminal.
        :param iterations: Number of iterations in tree searches
        :param model_name: Structure name of the neural network
        :param print_out: Prints out extra information by the neural network
        :return: Determined best move
        """
        if method == 'input':
            while True:
                try:
                    move = int(input('input column number between 1 and 7: ')) - 1
                    if move in self.get_legal_moves(self.game_state):
                        return move
                except ValueError:
                    pass
                print('input not valid')

        if method == 'mcts':
            pi = self.tree_search(iterations=iterations)
            return np.argmax(pi)

        if method == 'nnet':
            pi = self.tree_search_nnet(nnet=NNet(model_name=model_name), iterations=iterations,
                                       print_out=print_out)
            return np.argmax(pi)

        if method == 'simple_nnet':
            policy, val = NNet().prediction(self.game_state, player=self.player)
            return np.argmax(policy)

        else:
            print('method not valid')
Esempio n. 14
0
    [1, 1, 1, 1],
]

targetData = [
    [0, 0],
    [0, 0],
    [0, 0],
    [1, 0],
    [0, 0],
    [1, 0],
    [1, 0],
    [1, 1],
]

# nn = NNet(sizes=[4, 2])
nn = NNet([[[0.258584, -0.770864], [0.320592, 0.253271], [0.718206, -0.475193],
            [0.517670, 0.043095]]])
# nn.setActivations(['sigmoid'])

verbosePrint.vIteration = -1
verbosePrint.stage = ''

nn.printnn()

for row_index in range(len(targetData)):
    datain = inputData[row_index:row_index + 1]
    goal_prediction = targetData[row_index:row_index + 1]
    prediction = nn.fire(datain)
    print('Input:', datain, end=' ')
    print('Goal:', goal_prediction, end=' ')
    print('Prediction:', prediction)
Esempio n. 15
0
from netExamples.lecture.p3of5 import inputData
from netExamples.lecture.p3of5 import inputData as inputTraining
# from lecture.p3of5 import inputTraining
from netExamples.lecture.p3of5 import exactly as targetData
from netExamples.lecture.p3of5 import exactly as targetTraining
# from lecture.p3of5 import exactTraining as targetTraining

# nn = NNet(sizes=[6, 8, 2])
nn = NNet([
[[-0.809690, 0.529351, 0.130375, -0.668283, 0.607374, -0.560586, -0.631622, 0.770885],
 [0.772290, 0.701108, -0.449341, -0.654130, -0.313803, -0.156230, -0.912943, -0.920589],
 [0.285597, 0.646907, 0.003674, 0.279674, -0.764945, 0.966331, -0.000227, -0.537906],
 [0.966027, -0.940834, 0.268773, -0.683670, -0.772791, 0.159225, -0.624018, -0.483886],
 [0.418490, 0.493562, 0.056094, -0.845281, 0.681007, 0.868586, 0.402037, 0.880762],
 [0.270917, -0.058948, 0.545103, 0.637797, 0.012415, -0.676826, 0.995314, 0.577275]],
[[0.651401, 0.099612],
 [-0.519044, 0.414466],
 [0.030243, 0.950533],
 [0.594134, -0.812050],
 [-0.468415, -0.555548],
 [0.934102, -0.536747],
 [-0.005624, -0.327394],
 [-0.506726, -0.665108]],
], bias=[True, False])
# ])

nn.setActivations(['relu', 'linear'])

nn.checkup(inputData, targetData)

verbosePrint.vIteration = -1
verbosePrint.stage = ''
Esempio n. 16
0
# NNet rendering of the neural net in Grokking, p. 159
import numpy as np
import sys
from netExamples.grokking.mnist import \
    images, labels, test_images, test_labels
from nnet import NNet

np.random.seed(1)

batch_size = 100
iterations = 301

nn = NNet(sizes=[784, 100, 10], batch_size=batch_size)
nn.setActivations(['brelu', 'linear'])
nn.setMaskPr({1: 2})
nn.setAlpha(0.001)
nn.scale(0.1)

for j in range(iterations):
    error, correct_cnt = (0.0, 0)
    for i in range(int(len(images) / batch_size)):
        batch_start, batch_end = ((i * batch_size), ((i + 1) * batch_size))
        prediction = nn.learn(images[batch_start:batch_end],
                              labels[batch_start:batch_end])
        # vprint(i, nn, suffix='a', quit=True)
        # vprint(i, nn.dropout_masks[1], suffix='m', quit=True)
        # nn.train(labels[batch_start:batch_end])
        # vprint(i, nn, stage='b', quit=True)

        error += np.sum((labels[batch_start:batch_end] - prediction) ** 2)
        for k in range(batch_size):
# prepare mnist data
tr_i_raw = idx2numpy.convert_from_file('mnist\\train-images.idx3-ubyte')
tr_l_raw = idx2numpy.convert_from_file('mnist\\train-labels.idx1-ubyte')
test_i_raw = idx2numpy.convert_from_file('mnist\\t10k-images.idx3-ubyte')
test_l_raw = idx2numpy.convert_from_file('mnist\\t10k-labels.idx1-ubyte')
tr_i = tr_i_raw.reshape([len(tr_i_raw), 784]) / 255
test_i = test_i_raw.reshape([len(test_i_raw), 784]) / 255
tr_l = np.full([len(tr_l_raw), 10], 0)
test_l = np.full([len(test_l_raw), 10], 0)
for i in range(len(tr_l_raw)):
    tr_l[i][tr_l_raw[i]] = 1
for i in range(len(test_l_raw)):
    test_l[i][test_l_raw[i]] = 1

nn = NNet(784, 10, layers=[16, 20])
nn.fit(tr_i, tr_l, batch=32, timeout=60 * 0.5, epochs=3)
nn.plot_stats()

res = nn.predict(test_i)
acc_matrix = np.zeros((10, 10))
for i in range(len(test_i)):
    index_predicted = res[i].argmax()
    index_true = test_l[i].argmax()
    if index_predicted != index_true:
        acc_matrix[index_true, index_predicted] += 1

acc_matrix = acc_matrix / len(test_i)
plt.matshow(acc_matrix)
plt.xlabel('predicted values')
plt.ylabel('true values')
    def onBtnSolveClick(self):
        """
        Solve the IVP and plot solution (in case of checked).
        """
        condition = eval(self.ui.editInitialCondition.text())

        if self.data is None:
            try:
                self.interval = np.array(
                    eval(self.ui.editSolutionInterval.text()))
                self.interval = self.interval.reshape(len(self.interval), 1)
            except:
                message(self.ui.statusBar, 'Invalid solution interval format')
                return
        else:
            self.interval = self.data

        try:
            ivp = IVP(self.ui.editEquation.text(), condition[0], condition[1])
        except:
            message(self.ui.statusBar, 'Invalid equation type')
            return

        try:
            model = Model(self.ui.cbModel.currentText(),
                          int(self.ui.editNeurons.text()),
                          self.ui.cbActivationFunction.currentText())
            self.network = NNet(model.get(), self.ui.cbOptimizer.currentText(),
                                int(self.ui.editIterations.text()),
                                float(self.ui.editAccuracy.text()),
                                self.ui.cbTensorBoard.isChecked())
        except:
            message(self.ui.statusBar,
                    'An error has occured creating the model')
            return

        self.network.set_updatable_widgets(self.progress_bar, self.lb_loss)

        enable_widgets(self.widgets, False)

        # Calculates the time of the training session
        start_t = time.time()
        update_status_bar(self.ui.statusBar, self.progress_bar, self.lb_loss)
        self.h = self.network.solve_ivp(ivp, self.interval)
        hide([self.progress_bar])
        end_t = time.time()

        # Shows the calculated time
        self.lb_loss.setText(self.lb_loss.text() + '. The training took ' +
                             '{:.2f}'.format(end_t - start_t) + ' segs.')

        try:
            self.values = self.network(self.interval)
            np.savetxt(self.path + "result.txt", self.values, fmt='%10.4f')
        except:
            message(self.ui.statusBar, 'An error ocurred trying to save data!')
            return

        self.check_fo_saving_graph()

        enable_widgets(self.widgets, True)
        self.onCbModelSelectionChange(self.ui.cbModel.currentIndex())
Esempio n. 19
0
import numpy as np
import sys, os
from numpy import nan
from nnet import NNet
from verbosePrint import vprint
import verbosePrint

nn = NNet(sizes=[3, 2], batch_size=2)
# nn = NNet([[[0.258584, -0.770864],
#             [0.320592, 0.253271],
#             [0.718206, -0.475193],
#             [0.517670, 0.043095]]])
# nn.setActivations(['sigmoid'])

nn.printnn()
Esempio n. 20
0
class Model():
    def __init__(self, game):
        self.epoch_num = 10
        self.nnet = NNet(game, args)
        self.x, self.y = game.get_board_size()
        self.action_size = game.get_action_size()
        self.nnet.cuda()

    def train(self, examples):
        """
        use (board, policy, win rate) to train the nnet
        """
        optimizer = optim.Adam(self.nnet.parameters(),
                               lr=1e-7,
                               weight_decay=1e-7
                               )
        average_loss = 0
        total_batch_num = 0
        for epoch in range(self.epoch_num):
            epoch_loss = 0
            batch_idx = 0
            while batch_idx < int(len(examples)/args.batch_size):
                ids = np.random.randint(len(examples), size=args.batch_size)
                state, policy, v = list(zip(*[examples[i] for i in ids]))

                state = torch.Tensor(np.array(state)).contiguous().cuda()
                target_policy = torch.Tensor(
                    np.array(policy)).contiguous().cuda()
                target_v = torch.Tensor(np.array(v)).contiguous().cuda()

                # predict
                self.nnet.eval()
                out_policy, out_v = self.nnet(state)
                self.nnet.train()

                total_loss = self.loss(
                    target_policy, out_policy, target_v, out_v)
                '''
                print("state:\n {}".format(state[3]))
                print("policy:\n {}".format(target_policy[3]))
                print("nn_policy:\n {}".format(out_policy[3]))
                '''

                average_loss += abs(np.sum(total_loss.cpu().data.numpy()))
                epoch_loss += abs(np.sum(total_loss.cpu().data.numpy()))
                # print("loss in batch {} is {}".format(batch_idx, total_loss.cpu().data.numpy()))

                # compute gradient and do SGD step
                optimizer.zero_grad()
                total_loss.sum().backward()
                optimizer.step()

                batch_idx += 1
                total_batch_num += 1
            print('epoch: {}, loss: {}'.format(epoch, epoch_loss/batch_idx))
        self.nnet.eval()
        return average_loss / total_batch_num

    def predict(self, board):
        """
        board: np array with board
        """
        # preparing input
        board = torch.Tensor(board.astype(np.float64))
        board = board.contiguous().cuda()
        board = board.view(1, self.x, self.y)
        self.nnet.eval()
        with torch.no_grad():
            policy, v = self.nnet(board)
        return policy.data.cpu().numpy()[0], v.data.cpu().numpy()[0]

    def save_checkpoint(self, folder='train', filename='checkpoint.pth.tar'):
        filepath = os.path.join(folder, filename)
        if not os.path.exists(folder):
            os.mkdir(folder)
        torch.save({
            'state_dict': self.nnet.state_dict(),
        }, filepath)

    def load_checkpoint(self, folder='train', filename='checkpoint.pth.tar'):
        filepath = os.path.join(folder, filename)
        if not os.path.exists(filepath):
            raise("no model in {}".format(filepath))
        checkpoint = torch.load(filepath)
        self.nnet.load_state_dict(checkpoint['state_dict'])

    def loss(self, targets_p, outputs_p, target_v, outputs_v):
        '''
        print("loss:")
        print(-torch.sum(targets_p*outputs_p, dim=1))
        print((target_v-outputs_v.view(-1))**2)
        print(-torch.sum(targets_p*outputs_p, dim=1) +
              (target_v-outputs_v.view(-1))**2)
        '''
        return -torch.sum(targets_p*torch.log(outputs_p), dim=1) / self.action_size + (target_v-outputs_v.view(-1))**2
Esempio n. 21
0
from netExamples.lecture.sosb \
    import inputData, inputTraining, targetData, targetTraining

from nnet import NNet
from verbosePrint import vprint
import verbosePrint

nn = NNet(sizes=[12, 22, 4], bias=True)
nn.setActivations(['tanh', 'sigmoid'])

verbosePrint.vIteration = -1
verbosePrint.stage = ''

cycles = 20
report = max(1, cycles / 10)
checkupParams = (inputData, targetData, inputTraining, 25)

if cycles > 0:
    nn.checkup(*checkupParams)
    for iteration in range(cycles + 1):
        vprint(iteration, '~~~~~~~~~~~ Iteration %d ~~~~~~~~~~~' % iteration)
        combinedError = 0
        for row_index in range(len(targetTraining)):
            datain = inputTraining[row_index:row_index + 1]
            goal_prediction = targetTraining[row_index:row_index + 1]
            prediction = nn.fire(datain)
            # print('Prediction:' + str(prediction))
            vprint(iteration, nn)

            error = (goal_prediction - prediction)**2
            combinedError += error
Esempio n. 22
0
from nnet import NNet
from verbosePrint import vprint
import verbosePrint

from netExamples.lecture.p3of5 import inputData
from netExamples.lecture.p3of5 import inputTraining
# from lecture.p3of5 import inputData as inputTraining
from netExamples.lecture.p3of5 import atLeast as targetData
from netExamples.lecture.p3of5 import atLeastTraining as targetTraining
# from lecture.p3of5 import atLeast as targetTraining

# nn = NNet(sizes=[5, 3], bias=True)
nn = NNet(
    [[[-0.829638, 0.164111, 0.398885], [-0.603684, -0.603331, -0.819179],
      [-0.080592, -0.386044, -0.931615], [0.762514, -0.142887, -0.737862],
      [0.175430, 0.790112, -0.267367], [-0.732674, -0.825474, 0.232357]]],
    bias=True)
# ]])
nn.setActivations(['linear'])
nn.setVerbose([])

nn.checkup(inputData, targetData)

verbosePrint.vIteration = -1
verbosePrint.stage = ''

cycles = 80
report = cycles / 10

for iteration in range(cycles + 1):
    vprint(iteration, '~~~~~~~~~~~ Iteration %d ~~~~~~~~~~~' % iteration)
Esempio n. 23
0
from netExamples.grokking.mnist import \
    images, labels, test_images, test_labels
from cmatrix import ConvolutionMatrix

from nnet import NNet
from verbosePrint import vprint
import verbosePrint

np.random.seed(1)

batch_size = 128
iterations = 300

cm = ConvolutionMatrix(rows=9, cols=16, shapes=((28, 28), (3, 3)))
hLen = cm.outputLength()
nn = NNet(sizes=[784, hLen, 10], batch_size=batch_size)
nn.replaceLayer(0, cm)
nn.setActivations(['tanh', 'softmax'])
nn.setMaskPr({1: 2})
nn.setAlpha(2)
nn.scale(0.01, 0)
nn.scale(0.1, 1)

# vprint(0, nn, quit=True)
# params = (test_images, test_labels)
# nn.checkup(*params)
for j in range(0, iterations + 1):
    correct_cnt = 0
    for i in range(int(len(images) / batch_size)):
        batch_start, batch_end = ((i * batch_size), ((i + 1) * batch_size))
        prediction = nn.learn(images[batch_start:batch_end],
Esempio n. 24
0
def vprintnn(i, quit=False, suffix=''):
    ll = [layer_0, layer_1, layer_2]
    ww = [weights_0_1, weights_1_2]
    nn = NNet(weights=ww, layerList=ll)
    nn.setAlpha(alpha)
    vprint(i, nn, quit=quit, prefix='gro', suffix=suffix)
Esempio n. 25
0
import numpy as np
from nnet import NNet

nn = NNet([[[0.1], [0.2], [-0.1]]])
nn.setAlpha(0.01)
nn.setVerbose(True)

datain = [[8.5, 0.65, 1.2]]
goal = [[1]]
for i in range(4):
    output = nn.fire(datain)
    print('Goal:    ' + str(goal))
    print(nn)
    nn.learn(datain, goal)
class Ui_MainWindowImplementations():
    """
    Implement the slots of some of the events that may be fired by the
    components declared in `ui` class.
    """
    def __init__(self, ui):
        """
        `ui` is the class autogenerated by pyuic5.
        """
        self.ui = ui
        self.data = None

        self.progress_bar, self.lb_loss = update_status_bar(
            self.ui.statusBar, None, None)
        self.widgets = [
            self.ui.cbPoints, self.ui.editSolutionInterval, self.ui.btnSolve,
            self.ui.editEquation, self.ui.cbModel,
            self.ui.editInitialCondition, self.ui.editNeurons,
            self.ui.editAccuracy, self.ui.editIterations,
            self.ui.cbActivationFunction, self.ui.cbOptimizer,
            self.ui.cbPlotLoss, self.ui.cbPlotSolution, self.ui.cbTensorBoard,
            self.ui.btnLoadFromFile
        ]

        self.path = 'results/'
        # Create path `results` if doesn't exists
        os.makedirs(self.path, exist_ok=True)

    def onEditValueChange(self):
        """
        If any of the edits is empty then the button 'Solve' is disabled
        to avoid issues in execution.
        """
        self.ui.btnSolve.setEnabled(
            self.ui.editEquation.text() != ""
            and self.ui.editInitialCondition.text() != ""
            and self.ui.editNeurons.text() != ""
            and self.ui.editSolutionInterval.text() != ""
            and self.ui.editAccuracy.text() != ""
            and self.ui.editIterations.text() != "")

    def onCbModelSelectionChange(self, index):
        self.ui.cbActivationFunction.setEnabled(index not in [0, 1, 3])
        self.ui.lbActivationFunction.setEnabled(index not in [0, 1, 3])
        self.ui.editNeurons.setEnabled(index not in [0, 1])
        self.ui.lbNeurons.setEnabled(index not in [0, 1])

    def onCbPointsSelectionChange(self, index):
        self.ui.editSolutionInterval.setEnabled(index == 1)
        self.ui.editSolutionInterval.setClearButtonEnabled(index == 1)
        self.ui.editSolutionInterval.clear()
        self.ui.btnLoadFromFile.setVisible(index == 0)
        self.data = None

    def onBtnLoadFromFileClick(self):
        """
        Instead of writing list of points on your own,
        just load it from a `.txt` file.
        """
        dfile = QFileDialog.getOpenFileName(caption="Load data",
                                            directory=".",
                                            filter="Text files (*.txt)")
        if dfile[0] is not '':
            try:
                self.data = np.loadtxt(dfile[0])
                self.data = self.data.reshape(len(self.data), 1)
                self.ui.editSolutionInterval.setText(dfile[0])
            except:
                message(self.ui.statusBar, 'Invalid file format')

    def check_fo_saving_graph(self):
        plt.figure("Loss")
        sbn.set(font_scale=1)
        plt.xlabel("epoch")
        plt.ylabel("loss")
        plt.plot(range(1, 1 + len(self.h.history['loss'])),
                 self.h.history['loss'],
                 "r-",
                 label='loss')
        plt.legend()
        plt.savefig(self.path + "loss.png", format="png")
        if self.ui.cbPlotLoss.isChecked():
            plt.show()

        plt.figure("Result")
        sbn.set(font_scale=1)
        plt.xlabel("x")
        plt.ylabel("y")
        x = np.array(np.linspace(self.interval[0], self.interval[-1], 100))
        x = x.reshape(len(x), 1)
        plt.plot(x, self.network(x), '-g', label='NNet')
        plt.legend()
        plt.savefig(self.path + "result.png", format="png")
        if self.ui.cbPlotSolution.isChecked():
            plt.show()

    def onBtnSolveClick(self):
        """
        Solve the IVP and plot solution (in case of checked).
        """
        condition = eval(self.ui.editInitialCondition.text())

        if self.data is None:
            try:
                self.interval = np.array(
                    eval(self.ui.editSolutionInterval.text()))
                self.interval = self.interval.reshape(len(self.interval), 1)
            except:
                message(self.ui.statusBar, 'Invalid solution interval format')
                return
        else:
            self.interval = self.data

        try:
            ivp = IVP(self.ui.editEquation.text(), condition[0], condition[1])
        except:
            message(self.ui.statusBar, 'Invalid equation type')
            return

        try:
            model = Model(self.ui.cbModel.currentText(),
                          int(self.ui.editNeurons.text()),
                          self.ui.cbActivationFunction.currentText())
            self.network = NNet(model.get(), self.ui.cbOptimizer.currentText(),
                                int(self.ui.editIterations.text()),
                                float(self.ui.editAccuracy.text()),
                                self.ui.cbTensorBoard.isChecked())
        except:
            message(self.ui.statusBar,
                    'An error has occured creating the model')
            return

        self.network.set_updatable_widgets(self.progress_bar, self.lb_loss)

        enable_widgets(self.widgets, False)

        # Calculates the time of the training session
        start_t = time.time()
        update_status_bar(self.ui.statusBar, self.progress_bar, self.lb_loss)
        self.h = self.network.solve_ivp(ivp, self.interval)
        hide([self.progress_bar])
        end_t = time.time()

        # Shows the calculated time
        self.lb_loss.setText(self.lb_loss.text() + '. The training took ' +
                             '{:.2f}'.format(end_t - start_t) + ' segs.')

        try:
            self.values = self.network(self.interval)
            np.savetxt(self.path + "result.txt", self.values, fmt='%10.4f')
        except:
            message(self.ui.statusBar, 'An error ocurred trying to save data!')
            return

        self.check_fo_saving_graph()

        enable_widgets(self.widgets, True)
        self.onCbModelSelectionChange(self.ui.cbModel.currentIndex())
Esempio n. 27
0
    [0, 0],
    [1, 1],
    [0, 1],
    [1, 1],
    [1, 1],
    [0, 1],
    [0, 1],
    [1, 1],
    [0, 0],
]

inputTraining = inputData
targetTraining = targetData

# nn = NNet(sizes=[5, 3], bias=True)
nn = NNet(sizes=[6, 12, 2], bias=True)
# nn = NNet([[[-0.829638, 0.164111, 0.398885],
#             [-0.603684, -0.603331, -0.819179],
#             [-0.080592, -0.386044, -0.931615],
#             [0.762514, -0.142887, -0.737862],
#             [0.175430, 0.790112, -0.267367],
#             [-0.732674, -0.825474, 0.232357]]], bias=True)
# ]])
nn.setActivations(['relu', 'linear'])
nn.setVerbose([])

nn.checkup(inputData, targetData)

verbosePrint.vIteration = -1
verbosePrint.stage = ''
Esempio n. 28
0
 def __init__(self, game):
     self.epoch_num = 10
     self.nnet = NNet(game, args)
     self.x, self.y = game.get_board_size()
     self.action_size = game.get_action_size()
     self.nnet.cuda()
Esempio n. 29
0
    [0, 0, 0],
    [0, 1, 0],
    [0, 1, 0],
    [0, 0, 0],
    [0, 0, 0],
    [0, 0, 0],
    [0, 0, 0],
    [0, 0, 1],
    [0, 0, 0],
    [0, 0, 0],
    [0, 0, 0],
    [0, 0, 0],
    [0, 0, 0],
]

nn = NNet(sizes=[5, 3], bias=True)
nn.setActivations(['sigmoid'])

verbosePrint.vIteration = -1
verbosePrint.stage = ''

nn.checkup(inputData, targetData)

cycles = 40
report = cycles/10

for iteration in range(cycles + 1):
    vprint(iteration, '~~~~~~~~~~~ Iteration %d ~~~~~~~~~~~' % iteration)
    combinedError = 0
    for row_index in range(len(targetData)):
        datain = inputData[row_index:row_index + 1]
Esempio n. 30
0
from numpy import nan
from nnet import NNet
from verbosePrint import vp, vprint, vprintnn
import verbosePrint

demo = 6
verbosePrint.vIteration = 0
verbosePrint.stage = ''

vp()

if demo == 4:
    # weights = [[[0.1],
    #             [0.2],
    #             [-0.1]]]
    nn = NNet([[[0.1], [0.2], [-0.1]]])
    nn.setAlpha(0.01)

    datain = np.array([[8.5, 0.65, 1.2]])
    goal = np.array([[1]])
    for i in range(4):
        output = nn.fire(datain)
        print('Goal:    ' + str(goal))
        print(nn)
        nn.learn(datain, goal)

if demo == 6:
    nn = NNet([[[0.5], [0.48], [-0.7]]])
    nn.setAlpha(0.1)

    streetlights = np.array([[1, 0, 1], [0, 1, 1], [0, 0, 1], [1, 1, 1],