Esempio n. 1
0
 def __init__(self,
              n_population,
              pc,
              pm,
              bankruptcy_data,
              non_bankruptcy_data,
              clusters_data,
              cluster_centers,
              threshold_list,
              population=None):
     self.threshold_list = threshold_list
     self.bankruptcy_data = bankruptcy_data
     self.non_bankruptcy_data = non_bankruptcy_data
     self.neural_network = NeuralNetwork(n_inputs=6,
                                         n_outputs=2,
                                         n_neurons_to_hl=6,
                                         n_hidden_layers=1)
     self.n_population = n_population
     self.p_crossover = pc  # percent of crossover
     self.p_mutation = pm  # percent of mutation
     self.population = population or self._makepopulation()
     self.saved_cluster_data = clusters_data
     self.cluster_centers = cluster_centers
     self.predict_bankruptcy = []
     self.predict_non_bankruptcy = []
     self.fitness_list = []  # list of  chromosome and fitness
     self.currentUnderSampling = None
     self.predict_chromosome = None
     self.fitness()
Esempio n. 2
0
 def test_fit_and_predict(self):
     ann = NeuralNetwork([4, 2], alpha=1e-5)
     ann.fit(self.X, self.y)
     T = self.X[[10, 60, 110]]
     predictions = ann.predict(T)
     print(predictions)
     np.testing.assert_array_equal(predictions, np.array([0, 1, 2]))
Esempio n. 3
0
 def test_predict_probabilities(self):
     ann = NeuralNetwork([4, 2], alpha=1e-5)
     ann.fit(self.X, self.y)
     T = self.X[[15, 65, 115, 117]]
     ps = ann.predict_proba(T)
     margin = np.min(np.max(ps, axis=1))
     self.assertGreater(margin, 0.90)
Esempio n. 4
0
    def test_on_digits(self):
        data_full = datasets.load_digits()
        data, resp = utils.shuffle(data_full.data, data_full.target)
        m = data.shape[0]
        X, y = data[:m // 2], resp[:m // 2]
        X_test, y_test = data[m // 2:], resp[m // 2:]

        ann = NeuralNetwork([20, 5], alpha=1e-5)
        ann.fit(X, y)
        y_hat = ann.predict(X_test)
        acc = metrics.accuracy_score(y_test, y_hat)
        self.assertGreater(acc, 0.85)
Esempio n. 5
0
    def test_with_crossvalidation(self):
        from sklearn.model_selection import cross_validate

        clf = NeuralNetwork([10, 2], alpha=1e-5)
        scores = cross_validate(clf, self.X, self.y, scoring='accuracy', cv=5)
        acc = np.sum(scores["test_score"]) / 5
        self.assertGreater(acc, 0.94)
Esempio n. 6
0
 def __init__(self, **kwargs):
     self._quote_collection_name = kwargs.get("quote_collection")
     self._qoute_mongo_client = ToolMongoClient(
         kwargs.get("base_cfg_file", "mongo.conf"))
     self._class_type = kwargs.get("class_type")
     self._time_class = kwargs.get("time_class")
     self.X, self.y = self.__get_data()
     self.ann = NeuralNetwork([4, 3, 4], "tanh")
Esempio n. 7
0
 def test_gradient_computation(self):
     ann = NeuralNetwork([2, 2], alpha=1e-5)
     ann.set_data_(self.X, self.y)
     coefs = ann.init_weights_()
     g1 = ann.grad_approx(coefs, e=1e-5)
     g2 = ann.grad(coefs)
     np.testing.assert_array_almost_equal(g1, g2, decimal=10)
Esempio n. 8
0
    def evaluate_fitness_of_phenotype(cls, phenotype):

        # Copy flatland so we can reuse for all phenotypes
        flatland_scenarios = [
            deepcopy(flatland_scenario)
            for flatland_scenario in cls.flatland_scenarios
        ]

        # Init neural network layers from phenotype weights
        layers = list()
        for layer_weight in phenotype.layer_weights:
            layers.append(NeuronLayer(layer_weight))

        # Init phenotype neural network
        ann = NeuralNetwork(layers)

        # Init phenotype agent
        agent = FlatlandAgent(ann)

        # Init fitness container used for avg computation
        fitness_scenarios = list()

        # Run agent for scenarios
        for flatland_scenario in flatland_scenarios:

            # Init variables for scenario fitness evaluation
            phenotype_timesteps = 1
            poisons = 0
            foods = 0

            while phenotype_timesteps != cls.max_time_steps:

                # Get sensor data [left, front, right]
                cells = flatland_scenario.get_sensible_cells()

                # Let agent choose action based on sensor data
                action = agent.choose_action(cells)

                # Effect of action
                if action != Move.STAND_STILL:
                    cell_value = cells[action.value - 1]
                    if cell_value == Flatland.food:
                        foods += 1
                    elif cell_value == Flatland.poison:
                        poisons += 1

                # Commit action to world
                flatland_scenario.move_agent(action)

                phenotype_timesteps += 1

            # Add fitness evaluation for scenario
            fitness_scenarios.append(cls.fitness_function(foods, poisons))

        # Evaluate fitness of agent and add it to collection
        return sum(fitness_scenarios) / len(fitness_scenarios)
Esempio n. 9
0
 def test_weighs_structure(self):
     ann = NeuralNetwork([5, 3], alpha=1e-5)
     ann.set_data_(self.X, self.y)
     coefs = ann.unflatten_coefs(ann.init_weights_())
     shapes = np.array([coef.shape for coef in coefs])
     np.testing.assert_array_equal(shapes, np.array([[5, 5], [6, 3], [4,
                                                                      3]]))
def main():
    size_of_learn_sample = int(len(x) * 0.9)
    print(size_of_learn_sample)

    NN = NeuralNetwork(x, y, 0.5)

    # NN.print_matrices()
    NN.train()
    NN.print_matrices()
Esempio n. 11
0
def test_ann(X_train, y_train, X_test, y_test, layers, max_iter=100):
    """ """
    nn = NeuralNetwork(layers, [ Activation() ] * (len(layers) - 1), 
                       # regularization_rate=0.0001,
                       # regularization=Regularization('L2'),
                       cost=Cost('Bernoulli') )

    # print layers, ', ', nn.neurons[1].type, \
    #               ', ', nn.regular.type, \
    #               ', ', nn.cost.type, '\n' # 

    cost, cv_cost = 10., 0.
    accr, cv_accr =  0., 0.
    cv_cost_prev  =  0.

    # tolerance = np.finfo(float).eps
    cost_threshold = 0.1 # min_cost: np.finfo(float).eps
    stop_threshold = 0.05

    total_time = time.time()
    start_time = time.time()

    iter, n_iter_skip = 0, 100
    # iter, max_iter, n_iter_skip = 0, 1000, 100
    # while cost > cost_threshold:
    while iter < max_iter:

        Pt = nn.predict_matrix(X_test)
        # cv_cost = nn.calculate_cost(X_test,  y_test, Pt)
        cv_accr = nn.accuracy(X_test, y_test, Pt)
        
        P = nn.predict_matrix(X_train)
        # cost = nn.calculate_cost(X_train, y_train, P)
        accr = nn.accuracy(X_train, y_train, P)
        
        if  not (iter % n_iter_skip):
            print '%4d\ttime=%.3f sec.\tCost(X=%.3f T=%.3f)\tAccr(X=%.3f T=%.3f)' % \
                  (iter, time.time() - start_time, cost, cv_cost, accr, cv_accr)
            start_time = time.time()

            # # Ранняя остановка на основании ошибки на валидационном множестве
            # if cv_cost_prev and (cv_cost - cv_cost_prev) > stop_threshold:
            #     break
            # else: cv_cost_prev = cv_cost

        # if iter > max_iter: break
        nn.backprop(X_train, y_train, P)
        iter += 1
    
    print '\n%4d\ttime=%.3f sec.\tCost(X=%.3f T=%.3f)\tAccr(X=%.3f T=%.3f)' % \
            (iter, time.time() - total_time, cost, cv_cost, accr, cv_accr)
    return accr, cv_accr # cost, cv_cost
Esempio n. 12
0
def train(net: ann.NeuralNetwork,
          samples: Samples,
          error_function: Mapping[Tuple(Sample, ann.NeuralNetwork), float],
          goal,
          learning_rate=0.1,
          epoches=10
          ) -> ann.NeuralNetwork:
    """ Back propagation trainer.
    
    Parameters
    ---------
    goal:
        
    
    epoches:
        
    """
    
    result_net = net.copy()
    
    for epoch in range(epoches):
        
        for sample in samples:
            
            reach_the_goal = abs(error_function(sample, result_net)) < goal
            
            if reach_the_goal:
                return result_net
            
            else:
                result_net = update(result_net, sample,
                                    error_function, learning_rate
                                    )
    return result_net
    
    
Esempio n. 13
0
from ann import NeuralNetwork
import matplotlib.pyplot as plt
import pickle

ground_truth_dataset = [[15, 3, 1], [10, 5, 1], [20, 1, 1],
                        [1, 5, 0], [5, 0, 0], [30, 0, 1], [2, 1, 0], [5, 5, 1],
                        [7, 10, 0], [25, 6, 1]]

n = NeuralNetwork()
# n.w1 = 0.024739923179843564
# n.w2 = 0.782086203350603
# n.w3 = -0.7531968177831182
# n.w4 = -0.1328632433367266
# n.w5 = 0.7521920897144088
# n.w6 = -0.2568310157160393
# n.b1 = -0.5874532717161877
# n.b2 = -0.9232275728505895
# n.b3 = 0.75530633226724
n.w1 = 0.27985946828331204
n.w2 = 1.113212641433306
n.w3 = -1.8082799142509993
n.w4 = 1.572218879949482
n.w5 = 4.689412543076026
n.w6 = -4.6976243908676985
n.b1 = -3.6516812511529557
n.b2 = -0.5814582597051287
n.b3 = -2.1295699805201465

print("""Initial hyperparameters:
n.w1 = {}
n.w2 = {}
Esempio n. 14
0
 def test_set_data(self):
     ann = NeuralNetwork([5, 3], alpha=1e-5)
     ann.set_data_(self.X, self.y)
     coefs = ann.init_weights_()
     self.assertEqual(len(coefs), 55)
Esempio n. 15
0
import GA
import numpy as np
from ann import NeuralNetwork

sol_per_pop = 8
num_parents_mating = 4
crossover_location = 5

# Defining the population size.
pop_size = (sol_per_pop) # The population will have sol_per_pop chromosome where each chromosome has num_weights genes.
print(pop_size)
new_population = []

for i in range(sol_per_pop):
    new_network = NeuralNetwork()
    weights = []
    #Input Layer
    input_weights=np.random.rand(4, 6) #weight
    input_biases=np.random.rand(6) #biases
    weights.append(input_weights)
    weights.append(input_biases)
    #Hidden Layers
    hidden_weights=np.random.rand(6, 6) #weight
    hidden_biases=np.random.rand(6) #biases
    weights.append(hidden_weights)
    weights.append(hidden_biases)
    #Output Layer
    output_weights=np.random.rand(6, 3) #weight
    output_biases=np.random.rand(3) #biases
    weights.append(output_weights)
    weights.append(output_biases)
Esempio n. 16
0
parser.add_argument(
    "--batchSize", help="Batch size count used for training data.", default=1024, type=int)

parser.add_argument("--dropout", help="Percent Dropout",
                    default=0.25, type=float)

parser.add_argument("--train", help="Train dataset", default='dataset/train', type=str)
parser.add_argument("--test", help="Test dataset", default='dataset/test', type=str)

if __name__ == "__main__":
    args = parser.parse_args()

    ann = NeuralNetwork(
        tsSize=args.timeseries,
        lstmSize=args.lstmSize,
        dropout=args.dropout,
    )

    if os.path.isfile(args.weights):
        ann.model.load_weights(args.weights)

    if args.action == "train":
        ann.fit(args.weights, args.train, args.test,
                epochs=args.epochs, batch_size=args.batchSize)

    if args.action == "serve":
        serve(ann, args.test)

    if args.action == "cli":
        while 1:
Esempio n. 17
0
 def __init__(self):
     self.network = NeuralNetwork()
     self.fitness = -1
Esempio n. 18
0
 def generate_population(self):
     self.population = []
     for i in range(self.population_max_size):
         agent = Agent(i, NeuralNetwork())
         self.population.append(agent)
Esempio n. 19
0
    def __init__(self, flatland_scenarios, genotype_agent, time_steps, *args,
                 **kwargs):
        Tk.__init__(self, *args, **kwargs)
        self.configure(background="#2b2b2b")

        # Static view state variables
        self.max_time_steps = time_steps
        self.phenotype_agent = genotype_agent.translate_to_phenotype()
        self.flatland_agent = FlatlandAgent(
            NeuralNetwork(
                [NeuronLayer(w) for w in self.phenotype_agent.layer_weights]))
        self.canvas = Canvas(self,
                             width=FlatlandView.viewport_width,
                             height=FlatlandView.viewport_height,
                             bg="#a0a0a0",
                             highlightbackground="#000000")
        self.agent_polygon_x_y = [
            10, -10, 30, 0, 10, 10, 0, 30, -10, 10, -30, 0, -10, -10
        ]
        self.agent_senors_x_y = [
            30, -10, 40, 0, 30, 10, 20, 0, 0, 40, -10, 30, 0, 20, 10, 30, -30,
            -10, -20, 0, -30, 10, -40, 0
        ]
        self.flatland_length = flatland_scenarios[0].length
        self.agent_start = flatland_scenarios[0].agent_start

        # Dynamic view state variables
        self.flatland_scenarios = flatland_scenarios
        self.current_scenario = 0
        self.current_flatland_scenario = deepcopy(
            self.flatland_scenarios[self.current_scenario])
        self.time_steps = 0
        self.time_step_delay = IntVar(self, 1000)
        self.time_step_stringvar = StringVar(self, str(self.time_steps))
        self.sensor_rotation = 0

        # Draw flatland grid and configure canvas
        self.draw_canvas_lines()
        self.canvas.grid(rowspan=18, columnspan=3)

        self.time_step_num_font = tkFont.Font(family="Helvetica",
                                              size=72,
                                              weight="bold")
        self.time_step_text_font = tkFont.Font(family="Helvetica",
                                               size=18,
                                               weight="bold")
        self.time_step_frame = Frame(self, background="#2b2b2b")

        Label(self.time_step_frame,
              textvariable=self.time_step_stringvar,
              font=self.time_step_num_font,
              background="#2b2b2b",
              foreground="#a9b7c6").pack()
        Label(self.time_step_frame,
              text="time steps",
              font=self.time_step_text_font,
              background="#2b2b2b",
              foreground="#a9b7c6").pack()
        self.time_step_frame.grid(row=0, column=4)

        Scale(self,
              from_=250,
              to=5000,
              resolution=250,
              background="#2b2b2b",
              foreground="#a9b7c6",
              label="Time step delay (ms)",
              variable=self.time_step_delay).grid(row=16, column=4, padx=20)

        Button(self,
               text="New Scenarios",
               width=25,
               command=self.generate_new_scenarios,
               foreground="#a9b7c6",
               background="#2b2b2b",
               highlightbackground="#2b2b2b").grid(row=17, column=4, padx=20)
Esempio n. 20
0
class GA(object):
    def __init__(self,
                 n_population,
                 pc,
                 pm,
                 bankruptcy_data,
                 non_bankruptcy_data,
                 clusters_data,
                 cluster_centers,
                 threshold_list,
                 population=None):
        self.threshold_list = threshold_list
        self.bankruptcy_data = bankruptcy_data
        self.non_bankruptcy_data = non_bankruptcy_data
        self.neural_network = NeuralNetwork(n_inputs=6,
                                            n_outputs=2,
                                            n_neurons_to_hl=6,
                                            n_hidden_layers=1)
        self.n_population = n_population
        self.p_crossover = pc  # percent of crossover
        self.p_mutation = pm  # percent of mutation
        self.population = population or self._makepopulation()
        self.saved_cluster_data = clusters_data
        self.cluster_centers = cluster_centers
        self.predict_bankruptcy = []
        self.predict_non_bankruptcy = []
        self.fitness_list = []  # list of  chromosome and fitness
        self.currentUnderSampling = None
        self.predict_chromosome = None
        self.fitness()

    def init_neural_network(self, chromosome):
        # remove threshold from chromosome list
        primary_weights = chromosome[5:]
        matrix_list = []
        for i in range(0, int(len(primary_weights) / 6) - 1):
            matrix_list.append(primary_weights[i * 6:(i + 1) * 6])

        weights_matrix = array(matrix_list)

        layers = self.neural_network.layers

        i = 0
        for neuron in layers[0].neurons:
            neuron.set_weights(weights_matrix[:, i])
            i += 1

        layers[1].neurons[0].set_weights(primary_weights[-6:])

    def performance_measure(self):
        tp = 0
        fp = 0
        fn = 0
        tn = 0

        for item in self.bankruptcy_data:
            if self.predict(item) > 0.5:
                fp += 1
            else:
                tp += 1
        for item in self.non_bankruptcy_data:
            if self.predict(item) > 0.5:
                tn += 1
            else:
                fn += 1

        sensitivity = tp / (tp + fn)
        specificity = tn / (fp + tn)

        print("TP is : %s" % (str(tp)))
        print("FP is : %s" % (str(fp)))
        print("FN is : %s" % (str(fn)))
        print("TN is : %s" % (str(tn)))
        print("G-MEAN : %s" % (str(math.sqrt(sensitivity * specificity))))

        print("Hit-ratio : %s" % (str((tp + tn) / (tp + fn + fp + tn))))

    def predict(self, data):
        self.init_neural_network(self.predict_chromosome)
        return self.neural_network.update(data)[0]

    def _makepopulation(self):
        pop_list = []
        for i in range(0, self.n_population):
            weights = [random.uniform(-5, 5) for _ in range(0, 36)]
            out_weights = [random.uniform(-5, 5) for _ in range(0, 12)]

            # make threshold list
            threshold1 = [
                random.uniform(threshold[0], threshold[1])
                for threshold in self.threshold_list
            ]
            chromosome = threshold1 + weights + out_weights

            pop_list.append(chromosome)

        return pop_list

    '''
    b : the number of bankruptcy firms
    BAi : the classification accuracy of ith instances of bankruptcy firms
    n : the number of non-bankruptcy firms
    NAj : the classification accuracy of jth instances of non-bankruptcy firms
    POi : the predicated output of ith instances of bankruptcy firms
    AOi : the actual output of ith instances of non-bankruptcy firms
    POj : the predicated output of jth instances of non-bankruptcy firms
    AOj : the actual output of jth instances of non-bankruptcy firms

    '''

    def ba_i(self, poi):
        if poi < 0.5:
            return 1
        return 0

    def na_j(self, poj):
        if poj > 0.5:
            return 1
        return 0

    def cbeus(
        self, thresholds
    ):  # the rule structure for the cluster-based underSampling base on GA

        i = 0
        undersampling_clusters = []
        for cluster in self.saved_cluster_data:
            for instance in cluster:
                if euclidean_distances(
                    [instance], [self.cluster_centers[i]]) < thresholds[i]:
                    undersampling_clusters.append(instance)

            i += 1

        return undersampling_clusters

    def fitness(self):

        fitness_sum = 0
        trials = 3

        for index in range(0, trials):

            for item in self.population:
                print("underSampling : Cut off % s" % str(item[:5]))
                self.currentUnderSampling = self.cbeus(item[:5])
                self.init_neural_network(item)
                self.predict_non_bankruptcy = []
                self.predict_bankruptcy = []
                for instance in self.currentUnderSampling:
                    self.predict_non_bankruptcy.append(
                        self.neural_network.update(instance)[0])
                for instance in self.bankruptcy_data:
                    self.predict_bankruptcy.append(
                        self.neural_network.update(instance)[0])

                fitness_value = self.g_mean(len(self.currentUnderSampling))

                self.fitness_list.append([item, fitness_value])
                fitness_sum += fitness_value

            self.fitness_list.sort(key=lambda x: x[1])
            self._select_parents(fitness_sum)
            self.fitness_list.sort(key=lambda x: x[1])
            self.population = []

            for item in self.fitness_list:
                self.population.append(item[0])
                if len(self.population) == 5:
                    break

            if index == trials - 1:
                os.system('cls' if os.name == 'nt' else 'clear')
                print("The Optimization Weights For Predict Is: %s " %
                      str(self.population[0][5:]))
                self.predict_chromosome = self.population[0][5:]

        self.performance_measure()

    def g_mean(self, n):

        b = len(self.bankruptcy_data)

        sum_bankruptcy = 0
        sum_non_bankruptcy = 0

        for item in self.predict_non_bankruptcy:
            sum_non_bankruptcy += self.ba_i(item)

        for item in self.predict_bankruptcy:
            sum_bankruptcy += self.na_j(item)

        return math.sqrt(
            (1 / b) * sum_bankruptcy * (1 / n) * sum_non_bankruptcy)

    def cxOnePoint(self, ind1, ind2):
        """Executes a one point crossover on the input :term:`sequence` individuals.
        The two individuals are modified in place. The resulting individuals will
        respectively have the length of the other.

        :param ind1: The first individual participating in the crossover.
        :param ind2: The second individual participating in the crossover.
        :returns: A tuple of two individuals.
        This function uses the :func:`~random.randint` function from the
        python base :mod:`random` module.
        """
        size = min(len(ind1), len(ind2))
        cxpoint = random.randint(1, size - 1)
        ind1[cxpoint:], ind2[cxpoint:] = ind2[cxpoint:], ind1[cxpoint:]

        return ind1, ind2

    def swapMutation(self, ind1):

        size = len(ind1)
        swpoint1 = random.randint(1, size - 1)
        swpoint2 = random.randint(1, size - 1)

        ind1[swpoint1], ind1[swpoint2] = ind1[swpoint2], ind1[swpoint1]
        return ind1

    def _select_parents(self, fitness_sum):
        """
        Roulette wheel selection
        Selects parents from the given population

        Args :
        population (list) : Current population from which parents will be selected
        fitness_sum (number) : Summation of all fitness value

        Returns :
        parents (IndividualGA, IndividualGA) : selected parents
        """

        probability = []

        for item in self.fitness_list:
            probability.append(item[1] / fitness_sum)
            item.append(item[1] / fitness_sum)

        ncrossover = math.ceil(self.n_population * self.p_crossover /
                               2)  # number of crossover offspring
        nmutation = math.ceil(self.n_population *
                              self.p_mutation)  # number of mutation offspring

        selection_probability = set()

        while len(selection_probability) < ncrossover:
            selection_probability.add(random.uniform(0, 1))

        probability = np.cumsum(probability).tolist()

        def roulette(prob):
            for i in range(0, len(probability)):
                if prob < probability[i]:
                    return self.fitness_list[i][0]

        crossover_list = []

        for item in list(selection_probability):
            crossover_list.append(roulette(item))
            if len(crossover_list) == 2:
                inde1, inde2 = self.cxOnePoint(crossover_list[0][:],
                                               crossover_list[1][:])

                # init the neural network with the individual 1

                self.currentUnderSampling = self.cbeus(inde1[:5])
                self.init_neural_network(inde1)
                self.predict_non_bankruptcy = []
                self.predict_bankruptcy = []
                for instance in self.currentUnderSampling:
                    self.predict_non_bankruptcy.append(
                        self.neural_network.update(instance)[0])
                for instance in self.bankruptcy_data:
                    self.predict_bankruptcy.append(
                        self.neural_network.update(instance)[1])

                fitness_value = self.g_mean(len(self.currentUnderSampling))

                self.fitness_list.append([inde1, fitness_value])

                # init the neural network with the individual 2

                self.currentUnderSampling = self.cbeus(inde2[:5])
                self.init_neural_network(inde2)
                self.predict_non_bankruptcy = []
                self.predict_bankruptcy = []
                for instance in self.currentUnderSampling:
                    self.predict_non_bankruptcy.append(
                        self.neural_network.update(instance)[0])
                for instance in self.bankruptcy_data:
                    self.predict_bankruptcy.append(
                        self.neural_network.update(instance)[1])

                fitness_value = self.g_mean(len(self.currentUnderSampling))

                self.fitness_list.append([inde2, fitness_value])

                crossover_list = []

        # create individual with mutation

        selection_probability = set()
        while len(selection_probability) < nmutation:
            selection_probability.add(random.uniform(0, 1))

        for item in list(selection_probability):

            inde3 = self.swapMutation(roulette(item))
            self.currentUnderSampling = self.cbeus(inde3[:5])
            self.init_neural_network(inde3)
            self.predict_non_bankruptcy = []
            self.predict_bankruptcy = []
            for instance in self.currentUnderSampling:
                self.predict_non_bankruptcy.append(
                    self.neural_network.update(instance)[0])
            for instance in self.bankruptcy_data:
                self.predict_bankruptcy.append(
                    self.neural_network.update(instance)[1])
            fitness_value = self.g_mean(len(self.currentUnderSampling))
            self.fitness_list.append([inde3, fitness_value])
Esempio n. 21
0
import pygame
import pickle
import numpy as np
from game import init, iterate
from ann import NeuralNetwork
import utils

# Architecture (Specify archetecture here.)
network = NeuralNetwork(layers=[7, 14, 14, 7, 1],
                        activations=['sigmoid', 'sigmoid', 'sigmoid', 'tanh'])
lr = 0.1
losses = []

screen, font = init()
# Game Loop / Train Loop
frame_count, score, _, _, x = iterate.iterate(screen, font, 0, 0)
game = True
run = True
prediction = 0
while run:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            run = False
    prediction = utils.forward(x, network)
    frame_count, score, game, run, x = iterate.iterate(screen, font,
                                                       frame_count, score,
                                                       game, run, prediction)
    loss = utils.backward(prediction, x, lr, network)
    losses.append(loss)
pygame.quit()
Esempio n. 22
0
import pickle

ground_truth_dataset = [
  [15, 3, 1],
  [10, 5, 1],
  [20, 1, 1],
  [1,  5, 0],
  [5,  0, 0],
  [30, 0, 1],
  [2,  1, 0],
  [5,  5, 1],
  [7, 10, 0],
  [25, 6, 1]
]

n = NeuralNetwork()

stats = n.train(ground_truth_dataset, 100001, 0.001)
epochs = stats[0]
min_losses = stats[1]
avg_losses = stats[2]
max_losses = stats[3]

with open("model.bin", "wb") as f:
  pickle.dump(n, f)

plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.plot(epochs, min_losses, label="Min loss")
plt.plot(epochs, avg_losses, label="Avg loss")
plt.plot(epochs, max_losses, label="Max loss")
Esempio n. 23
0
class GWO(object):
    def __init__(self,
                 n_population,
                 bankruptcy_data,
                 non_bankruptcy_data,
                 clusters_data,
                 cluster_centers,
                 threshold_list,
                 population=None):
        self.threshold_list = threshold_list
        self.bankruptcy_data = bankruptcy_data
        self.non_bankruptcy_data = non_bankruptcy_data
        self.neural_network = NeuralNetwork(n_inputs=6,
                                            n_outputs=2,
                                            n_neurons_to_hl=6,
                                            n_hidden_layers=1)
        self.n_population = n_population
        self.population = population or self._makepopulation()
        self.saved_cluster_data = clusters_data
        self.cluster_centers = cluster_centers
        self.predict_bankruptcy = []
        self.predict_non_bankruptcy = []
        self.fitness_list = []  # list of  chromosome and fitness
        self.currentUnderSampling = None
        self.predict_position = None
        self.search_main()

    def init_neural_network(self, chromosome):
        # remove threshold from chromosome list
        primary_weights = chromosome[5:]
        matrix_list = []
        for i in range(0, int(len(primary_weights) / 6) - 1):
            matrix_list.append(primary_weights[i * 6:(i + 1) * 6])

        weights_matrix = array(matrix_list)

        layers = self.neural_network.layers

        i = 0
        for neuron in layers[0].neurons:
            neuron.set_weights(weights_matrix[:, i])
            i += 1

        layers[1].neurons[0].set_weights(primary_weights[-6:])

    def performance_measure(self):
        tp = 0
        fp = 0
        fn = 0
        tn = 0

        for item in self.bankruptcy_data:
            if self.predict(item) > 0.5:
                fp += 1
            else:
                tp += 1
        for item in self.non_bankruptcy_data:
            if self.predict(item) > 0.5:
                tn += 1
            else:
                fn += 1

        sensitivity = tp / (tp + fn)
        specificity = tn / (fp + tn)

        print("TP is : %s" % (str(tp)))
        print("FP is : %s" % (str(fp)))
        print("FN is : %s" % (str(fn)))
        print("TN is : %s" % (str(tn)))
        print("G-MEAN : %s" % (str(math.sqrt(sensitivity * specificity))))

        print("Hit-ratio : %s" % (str((tp + tn) / (tp + fn + fp + tn))))

    def predict(self, data):
        self.init_neural_network(self.predict_position)
        return self.neural_network.update(data)[0]

    def _makepopulation(self):
        pop_list = []
        for i in range(0, self.n_population):
            weights = [random.uniform(-5, 5) for _ in range(0, 36)]
            out_weights = [random.uniform(-5, 5) for _ in range(0, 12)]

            # make threshold list
            threshold1 = [
                random.uniform(threshold[0], threshold[1])
                for threshold in self.threshold_list
            ]
            position = threshold1 + weights + out_weights

            pop_list.append(position)

        return pop_list

    def ba_i(self, poi):
        if poi < 0.5:
            return 1
        return 0

    def na_j(self, poj):
        if poj > 0.5:
            return 1
        return 0

    def cbeus(
        self, thresholds
    ):  # the rule structure for the cluster-based underSampling base on GA

        i = 0
        undersampling_clusters = []
        for cluster in self.saved_cluster_data:
            for instance in cluster:
                if euclidean_distances(
                    [instance], [self.cluster_centers[i]]) < thresholds[i]:
                    undersampling_clusters.append(instance)

            i += 1

        return undersampling_clusters

    def search_main(self):
        Max_iter = 3

        for index in range(0, Max_iter):

            for position in self.population:
                print("underSampling : Cut off % s" % str(position[:5]))
                self.currentUnderSampling = self.cbeus(position[:5])
                self.init_neural_network(position)
                self.predict_non_bankruptcy = []
                self.predict_bankruptcy = []
                for instance in self.currentUnderSampling:
                    self.predict_non_bankruptcy.append(
                        self.neural_network.update(instance)[0])
                for instance in self.bankruptcy_data:
                    self.predict_bankruptcy.append(
                        self.neural_network.update(instance)[0])

                fitness_value = self.g_mean(len(self.currentUnderSampling))

                self.fitness_list.append([position, fitness_value])

            self.fitness_list.sort(key=lambda x: x[1])

            # Update Alpha, Beta, and Delta

            Alpha_pos = self.fitness_list[0][0]  # Update alpha

            Beta_pos = self.fitness_list[1][0]  # Update beta

            Delta_pos = self.fitness_list[2][0]  # Update delta

            a = 2 - index * (
                (2) / Max_iter)  # a decreases linearly from 2 to 0

            for position in self.population:
                for j in range(0, len(position)):

                    r1 = random.random()  # r1 is a random number in [0,1]
                    r2 = random.random()  # r2 is a random number in [0,1]

                    A1 = 2 * a * r1 - a  # Equation (3.3)
                    C1 = 2 * r2  # Equation (3.4)

                    D_alpha = abs(C1 * Alpha_pos[j] -
                                  position[j])  # Equation (3.5)-part 1
                    X1 = Alpha_pos[j] - A1 * D_alpha  # Equation (3.6)-part 1

                    r1 = random.random()
                    r2 = random.random()

                    A2 = 2 * a * r1 - a  # Equation (3.3)
                    C2 = 2 * r2  # Equation (3.4)

                    D_beta = abs(C2 * Beta_pos[j] -
                                 position[j])  # Equation (3.5)-part 2
                    X2 = Beta_pos[j] - A2 * D_beta  # Equation (3.6)-part 2

                    r1 = random.random()
                    r2 = random.random()

                    A3 = 2 * a * r1 - a  # Equation (3.3)
                    C3 = 2 * r2  # Equation (3.4)

                    D_delta = abs(C3 * Delta_pos[j] -
                                  position[j])  # Equation (3.5)-part 3
                    X3 = Delta_pos[j] - A3 * D_delta  # Equation (3.5)-part 3

                    position[j] = (X1 + X2 + X3) / 3  # Equation (3.7)

            if index == Max_iter - 1:
                os.system('cls' if os.name == 'nt' else 'clear')
                self.fitness_list.sort(key=lambda x: x[1])
                print("The Optimization Weights For Predict Is: %s " %
                      str(self.fitness_list[0][0][5:]))
                self.predict_position = self.fitness_list[0][0][5:]

        self.performance_measure()

    def g_mean(self, n):

        b = len(self.bankruptcy_data)

        sum_bankruptcy = 0
        sum_non_bankruptcy = 0

        for item in self.predict_non_bankruptcy:
            sum_non_bankruptcy += self.ba_i(item)

        for item in self.predict_bankruptcy:
            sum_bankruptcy += self.na_j(item)

        return math.sqrt(
            (1 / b) * sum_bankruptcy * (1 / n) * sum_non_bankruptcy)