Esempio n. 1
0
 def __init__(self,
              n_population,
              pc,
              pm,
              bankruptcy_data,
              non_bankruptcy_data,
              clusters_data,
              cluster_centers,
              threshold_list,
              population=None):
     self.threshold_list = threshold_list
     self.bankruptcy_data = bankruptcy_data
     self.non_bankruptcy_data = non_bankruptcy_data
     self.neural_network = NeuralNetwork(n_inputs=6,
                                         n_outputs=2,
                                         n_neurons_to_hl=6,
                                         n_hidden_layers=1)
     self.n_population = n_population
     self.p_crossover = pc  # percent of crossover
     self.p_mutation = pm  # percent of mutation
     self.population = population or self._makepopulation()
     self.saved_cluster_data = clusters_data
     self.cluster_centers = cluster_centers
     self.predict_bankruptcy = []
     self.predict_non_bankruptcy = []
     self.fitness_list = []  # list of  chromosome and fitness
     self.currentUnderSampling = None
     self.predict_chromosome = None
     self.fitness()
Esempio n. 2
0
 def test_weighs_structure(self):
     ann = NeuralNetwork([5, 3], alpha=1e-5)
     ann.set_data_(self.X, self.y)
     coefs = ann.unflatten_coefs(ann.init_weights_())
     shapes = np.array([coef.shape for coef in coefs])
     np.testing.assert_array_equal(shapes, np.array([[5, 5], [6, 3], [4,
                                                                      3]]))
Esempio n. 3
0
 def test_gradient_computation(self):
     ann = NeuralNetwork([2, 2], alpha=1e-5)
     ann.set_data_(self.X, self.y)
     coefs = ann.init_weights_()
     g1 = ann.grad_approx(coefs, e=1e-5)
     g2 = ann.grad(coefs)
     np.testing.assert_array_almost_equal(g1, g2, decimal=10)
Esempio n. 4
0
 def test_fit_and_predict(self):
     ann = NeuralNetwork([4, 2], alpha=1e-5)
     ann.fit(self.X, self.y)
     T = self.X[[10, 60, 110]]
     predictions = ann.predict(T)
     print(predictions)
     np.testing.assert_array_equal(predictions, np.array([0, 1, 2]))
Esempio n. 5
0
 def test_predict_probabilities(self):
     ann = NeuralNetwork([4, 2], alpha=1e-5)
     ann.fit(self.X, self.y)
     T = self.X[[15, 65, 115, 117]]
     ps = ann.predict_proba(T)
     margin = np.min(np.max(ps, axis=1))
     self.assertGreater(margin, 0.90)
Esempio n. 6
0
    def test_with_crossvalidation(self):
        from sklearn.model_selection import cross_validate

        clf = NeuralNetwork([10, 2], alpha=1e-5)
        scores = cross_validate(clf, self.X, self.y, scoring='accuracy', cv=5)
        acc = np.sum(scores["test_score"]) / 5
        self.assertGreater(acc, 0.94)
Esempio n. 7
0
 def __init__(self, **kwargs):
     self._quote_collection_name = kwargs.get("quote_collection")
     self._qoute_mongo_client = ToolMongoClient(
         kwargs.get("base_cfg_file", "mongo.conf"))
     self._class_type = kwargs.get("class_type")
     self._time_class = kwargs.get("time_class")
     self.X, self.y = self.__get_data()
     self.ann = NeuralNetwork([4, 3, 4], "tanh")
def main():
    size_of_learn_sample = int(len(x) * 0.9)
    print(size_of_learn_sample)

    NN = NeuralNetwork(x, y, 0.5)

    # NN.print_matrices()
    NN.train()
    NN.print_matrices()
Esempio n. 9
0
    def evaluate_fitness_of_phenotype(cls, phenotype):

        # Copy flatland so we can reuse for all phenotypes
        flatland_scenarios = [
            deepcopy(flatland_scenario)
            for flatland_scenario in cls.flatland_scenarios
        ]

        # Init neural network layers from phenotype weights
        layers = list()
        for layer_weight in phenotype.layer_weights:
            layers.append(NeuronLayer(layer_weight))

        # Init phenotype neural network
        ann = NeuralNetwork(layers)

        # Init phenotype agent
        agent = FlatlandAgent(ann)

        # Init fitness container used for avg computation
        fitness_scenarios = list()

        # Run agent for scenarios
        for flatland_scenario in flatland_scenarios:

            # Init variables for scenario fitness evaluation
            phenotype_timesteps = 1
            poisons = 0
            foods = 0

            while phenotype_timesteps != cls.max_time_steps:

                # Get sensor data [left, front, right]
                cells = flatland_scenario.get_sensible_cells()

                # Let agent choose action based on sensor data
                action = agent.choose_action(cells)

                # Effect of action
                if action != Move.STAND_STILL:
                    cell_value = cells[action.value - 1]
                    if cell_value == Flatland.food:
                        foods += 1
                    elif cell_value == Flatland.poison:
                        poisons += 1

                # Commit action to world
                flatland_scenario.move_agent(action)

                phenotype_timesteps += 1

            # Add fitness evaluation for scenario
            fitness_scenarios.append(cls.fitness_function(foods, poisons))

        # Evaluate fitness of agent and add it to collection
        return sum(fitness_scenarios) / len(fitness_scenarios)
Esempio n. 10
0
    def test_on_digits(self):
        data_full = datasets.load_digits()
        data, resp = utils.shuffle(data_full.data, data_full.target)
        m = data.shape[0]
        X, y = data[:m // 2], resp[:m // 2]
        X_test, y_test = data[m // 2:], resp[m // 2:]

        ann = NeuralNetwork([20, 5], alpha=1e-5)
        ann.fit(X, y)
        y_hat = ann.predict(X_test)
        acc = metrics.accuracy_score(y_test, y_hat)
        self.assertGreater(acc, 0.85)
Esempio n. 11
0
import GA
import numpy as np
from ann import NeuralNetwork

sol_per_pop = 8
num_parents_mating = 4
crossover_location = 5

# Defining the population size.
pop_size = (sol_per_pop) # The population will have sol_per_pop chromosome where each chromosome has num_weights genes.
print(pop_size)
new_population = []

for i in range(sol_per_pop):
    new_network = NeuralNetwork()
    weights = []
    #Input Layer
    input_weights=np.random.rand(4, 6) #weight
    input_biases=np.random.rand(6) #biases
    weights.append(input_weights)
    weights.append(input_biases)
    #Hidden Layers
    hidden_weights=np.random.rand(6, 6) #weight
    hidden_biases=np.random.rand(6) #biases
    weights.append(hidden_weights)
    weights.append(hidden_biases)
    #Output Layer
    output_weights=np.random.rand(6, 3) #weight
    output_biases=np.random.rand(3) #biases
    weights.append(output_weights)
    weights.append(output_biases)
Esempio n. 12
0
    def __init__(self, flatland_scenarios, genotype_agent, time_steps, *args,
                 **kwargs):
        Tk.__init__(self, *args, **kwargs)
        self.configure(background="#2b2b2b")

        # Static view state variables
        self.max_time_steps = time_steps
        self.phenotype_agent = genotype_agent.translate_to_phenotype()
        self.flatland_agent = FlatlandAgent(
            NeuralNetwork(
                [NeuronLayer(w) for w in self.phenotype_agent.layer_weights]))
        self.canvas = Canvas(self,
                             width=FlatlandView.viewport_width,
                             height=FlatlandView.viewport_height,
                             bg="#a0a0a0",
                             highlightbackground="#000000")
        self.agent_polygon_x_y = [
            10, -10, 30, 0, 10, 10, 0, 30, -10, 10, -30, 0, -10, -10
        ]
        self.agent_senors_x_y = [
            30, -10, 40, 0, 30, 10, 20, 0, 0, 40, -10, 30, 0, 20, 10, 30, -30,
            -10, -20, 0, -30, 10, -40, 0
        ]
        self.flatland_length = flatland_scenarios[0].length
        self.agent_start = flatland_scenarios[0].agent_start

        # Dynamic view state variables
        self.flatland_scenarios = flatland_scenarios
        self.current_scenario = 0
        self.current_flatland_scenario = deepcopy(
            self.flatland_scenarios[self.current_scenario])
        self.time_steps = 0
        self.time_step_delay = IntVar(self, 1000)
        self.time_step_stringvar = StringVar(self, str(self.time_steps))
        self.sensor_rotation = 0

        # Draw flatland grid and configure canvas
        self.draw_canvas_lines()
        self.canvas.grid(rowspan=18, columnspan=3)

        self.time_step_num_font = tkFont.Font(family="Helvetica",
                                              size=72,
                                              weight="bold")
        self.time_step_text_font = tkFont.Font(family="Helvetica",
                                               size=18,
                                               weight="bold")
        self.time_step_frame = Frame(self, background="#2b2b2b")

        Label(self.time_step_frame,
              textvariable=self.time_step_stringvar,
              font=self.time_step_num_font,
              background="#2b2b2b",
              foreground="#a9b7c6").pack()
        Label(self.time_step_frame,
              text="time steps",
              font=self.time_step_text_font,
              background="#2b2b2b",
              foreground="#a9b7c6").pack()
        self.time_step_frame.grid(row=0, column=4)

        Scale(self,
              from_=250,
              to=5000,
              resolution=250,
              background="#2b2b2b",
              foreground="#a9b7c6",
              label="Time step delay (ms)",
              variable=self.time_step_delay).grid(row=16, column=4, padx=20)

        Button(self,
               text="New Scenarios",
               width=25,
               command=self.generate_new_scenarios,
               foreground="#a9b7c6",
               background="#2b2b2b",
               highlightbackground="#2b2b2b").grid(row=17, column=4, padx=20)
Esempio n. 13
0
 def __init__(self):
     self.network = NeuralNetwork()
     self.fitness = -1
Esempio n. 14
0
import pygame
import pickle
import numpy as np
from game import init, iterate
from ann import NeuralNetwork
import utils

# Architecture (Specify archetecture here.)
network = NeuralNetwork(layers=[7, 14, 14, 7, 1],
                        activations=['sigmoid', 'sigmoid', 'sigmoid', 'tanh'])
lr = 0.1
losses = []

screen, font = init()
# Game Loop / Train Loop
frame_count, score, _, _, x = iterate.iterate(screen, font, 0, 0)
game = True
run = True
prediction = 0
while run:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            run = False
    prediction = utils.forward(x, network)
    frame_count, score, game, run, x = iterate.iterate(screen, font,
                                                       frame_count, score,
                                                       game, run, prediction)
    loss = utils.backward(prediction, x, lr, network)
    losses.append(loss)
pygame.quit()
Esempio n. 15
0
 def generate_population(self):
     self.population = []
     for i in range(self.population_max_size):
         agent = Agent(i, NeuralNetwork())
         self.population.append(agent)
Esempio n. 16
0
 def test_set_data(self):
     ann = NeuralNetwork([5, 3], alpha=1e-5)
     ann.set_data_(self.X, self.y)
     coefs = ann.init_weights_()
     self.assertEqual(len(coefs), 55)
Esempio n. 17
0
import pickle

ground_truth_dataset = [
  [15, 3, 1],
  [10, 5, 1],
  [20, 1, 1],
  [1,  5, 0],
  [5,  0, 0],
  [30, 0, 1],
  [2,  1, 0],
  [5,  5, 1],
  [7, 10, 0],
  [25, 6, 1]
]

n = NeuralNetwork()

stats = n.train(ground_truth_dataset, 100001, 0.001)
epochs = stats[0]
min_losses = stats[1]
avg_losses = stats[2]
max_losses = stats[3]

with open("model.bin", "wb") as f:
  pickle.dump(n, f)

plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.plot(epochs, min_losses, label="Min loss")
plt.plot(epochs, avg_losses, label="Avg loss")
plt.plot(epochs, max_losses, label="Max loss")
Esempio n. 18
0
parser.add_argument(
    "--batchSize", help="Batch size count used for training data.", default=1024, type=int)

parser.add_argument("--dropout", help="Percent Dropout",
                    default=0.25, type=float)

parser.add_argument("--train", help="Train dataset", default='dataset/train', type=str)
parser.add_argument("--test", help="Test dataset", default='dataset/test', type=str)

if __name__ == "__main__":
    args = parser.parse_args()

    ann = NeuralNetwork(
        tsSize=args.timeseries,
        lstmSize=args.lstmSize,
        dropout=args.dropout,
    )

    if os.path.isfile(args.weights):
        ann.model.load_weights(args.weights)

    if args.action == "train":
        ann.fit(args.weights, args.train, args.test,
                epochs=args.epochs, batch_size=args.batchSize)

    if args.action == "serve":
        serve(ann, args.test)

    if args.action == "cli":
        while 1: