Beispiel #1
0
def main():
    regular_deck = generate_regular_deck()

    shuffle(regular_deck)

    print("Both Players received equally many cards")
    cards_playerA, cards_playerB = assign_cards(regular_deck)

    i = 0

    while len(cards_playerA) not in [0, 52] and i < 1000:
        print(f"Card Rank of Player A: {cards_playerA[0].rank}")
        print(f"Card Rank of Player B: {cards_playerB[0].rank}")
        if cards_playerA[0].rank > cards_playerB[0].rank:
            print("Card of A won.")
            cards_playerA.append(cards_playerB.pop(0))
            cards_playerA.append(cards_playerA.pop(0))
            print(f"Number of A is now: {len(cards_playerA)}")
            print(f"Number of B is now: {len(cards_playerB)}")
        elif cards_playerA[0].rank < cards_playerB[0].rank:
            print("Card of B won.")
            cards_playerB.append(cards_playerB.pop(0))
            cards_playerB.append(cards_playerA.pop(0))
            print(f"Number of A is now: {len(cards_playerA)}")
            print(f"Number of B is now: {len(cards_playerB)}")
        else:
            print("It is war!!! Both Players reveal two more cards")
            if cards_playerA[2].rank > cards_playerB[2].rank:
                print("Player A has won the War.")
                for i in range(3):
                    cards_playerA.append(cards_playerB.pop(0))
                    cards_playerA.append(cards_playerA.pop(0))
                print(f"Number of A is now: {len(cards_playerA)}")
                print(f"Number of B is now: {len(cards_playerB)}")
            else:
                print("Player B has won the war.")
                for i in range(3):
                    cards_playerB.append(cards_playerB.pop(0))
                    cards_playerB.append(cards_playerA.pop(0))
                print(f"Number of A is now: {len(cards_playerA)}")
                print(f"Number of B is now: {len(cards_playerB)}")
        i += 1
    print("-------------------")
    print("SUMMARY")
    print("The game has ended.")
    if len(cards_playerA) == 0:
        print("Player B won.")
    elif len(cards_playerA) == 52:
        print("Player A won.")
    else:
        print(f"The game was stopped at {i} interations.")
        if len(cards_playerA) > len(cards_playerB):
            print(f"Player A has won these {i} rounds.")
        elif len(cards_playerA) < len(cards_playerB):
            print(f"Player B has won these {i} rounds.")
        else:
            print("It was a tie.")
    print(f"The game lasted {i} rounds")
    def train(self, data, labels, epochs=1, record_epochs=False, validation_set=None):
        """
        This method runs a simple version of the perceptron algorithm on the data given.
        :param data: numpy array of each data point to be used for training. This array should already be padded with
        a 1's column in order to ensure a bias weight is included.
        :param labels: numpy array specify the labels {-1, 1}
        :return: None
        """
        # Pad the data with an all ones vector.
        p_data = pad(data)

        # Initialize the weights.
        self.weights = np.random.uniform(low=-0.01, high=0.01, size=p_data.shape[1])

        for epoch in range(epochs):

            # Go through each data point.
            for x, y in zip(*shuffle(p_data, labels)):

                # If (w^t*x + b)*y < margin make an update.
                if np.dot(self.weights, x) * y < self.margin:
                    # Calculate the aggressive learning rate.
                    aggressive_learning_rate = (self.margin - (y * np.dot(self.weights, x))) / (np.dot(x, x) + 1)

                    # Update the weights
                    self.weights = self.weights + (aggressive_learning_rate * y * x)

                    # Record update count.
                    self.update_count += 1

            # record epoch specific information if specified
            if record_epochs:
                val_x, val_y = validation_set[0], validation_set[1]
                self.epoch_records[epoch + 1] = {'accuracy': accuracy(self.predict(val_x), val_y),
                                                 'weights': self.weights}
Beispiel #3
0
 def train(self, iteration):
     cards = [1, 2, 3]
     util = 0.0
     for i in range(0, iteration):
         cards = helpers.shuffle(cards)
         util += self.cfr(cards, "", 1.0, 1.0)
     print("Average game value: " + str(util / iteration))
     for key in self.nodeMap:
         print(self.nodeMap[key])
def nn_cross_validation(X, y, folds, hidden_layer_size, learning_rate, ratio = 0.2, epochs = 30, callbacks = None, validation_split = None,  save_loss = True):
    # Cross validation
    cv = StratifiedKFold(n_splits = folds)
    results = {'accuracy' : [], 'f1-score' : [], 'recall' : [], 'precision' : [], 'space' : []}
    history = []

    for count, (train_index, test_index) in enumerate(cv.split(X, y), 1):
        # Inizializzo modello
        model = ff.create_sequential(input_size = (82, ), hidden_layer_size = hidden_layer_size, learning_rate = learning_rate, activation = 'relu')

        print("Indici Train: ", train_index, " Indici Test", test_index)
        print("Totale elementi: ", len(X))

        # Seleziono i fold su cui lavorare
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        # Bilancio solamente il dataset di train (Modifico anche quello originale?)
        X_train, y_train = init.undersample(X_train, y_train, ratio = ratio)

        # Training
        if validation_split is not None:
            X_train, y_train = helpers.shuffle(X_train, y_train)
        history.append(ff.train(model, X_train, y_train, cbs = callbacks, validation_split = validation_split, epochs = epochs))

        # Da modificare
        model = load_model('best_model.h5')

        # Valuto size modello (Metto location come param)
        size = ff.model_size(model, location = f"local_exec/classifier_testing/esperimenti_ffnn/risultati/pesi_modelli_addestrati/ff_{count}.p")
        # Score del classificatore sul testing
        scores = ff.evaluate(model, X_test, y_test, verbose = False)

        # Aggiorno risultati delle metriche (Manca dev. st e size)
        results['accuracy'].append(scores['accuracy']) 
        results['space'].append(size)
        results['f1-score'].append(scores['1']['f1-score'])
        results['precision'].append(scores['1']['precision'])
        results['recall'].append(scores['1']['recall'])

    # Salvo grafico loss
    if save_loss: ff.save_losses_plot(history[:3], f"test_plot_neuron{hidden_layer_size}_lr{learning_rate}", colors = ['r', 'b', 'g'])

    # Media sui 5 risultati
    mean_results = {key : sum(value) / folds for key, value in results.items()}
    std_results = {key : (sum([((x - mean_results[key]) ** 2) for x in value]) / folds) ** 0.5 for key, value in results.items()}

    return mean_results, std_results
    def train(self, data, labels, epochs=1, record_epochs=False, validation_set=None):
        """
        This method runs a simple version of the perceptron algorithm on the data given.
        :param data: numpy array of each data point to be used for training. This array should already be padded with
        a 1's column in order to ensure a bias weight is included.
        :param labels: numpy array specify the labels {-1, 1}
        :return: None
        """
        # Pad the data with an all ones vector.
        p_data = pad(data)

        # Initialize the weights and average weigths.
        self.weights = np.random.uniform(low=-0.01, high=0.01, size=p_data.shape[1])
        self.average_weights = self.weights

        for epoch in range(epochs):

            # Go through each data point.
            for x, y in zip(*shuffle(p_data, labels)):

                # If (w^t*x + b)*y < 0 make an update.
                if np.dot(self.weights, x) * y < 0:
                    # Update the weights
                    self.weights = self.weights + self.learning_rate * y * x

                    # Record update count.
                    self.update_count += 1

                # Increment the average weights even if no misprediction happens.
                self.average_weights = self.average_weights + self.weights

            # record epoch specific information if specified
            if record_epochs:
                val_x, val_y = validation_set[0], validation_set[1]
                # Set current weights to the averaged weights so predict will use them.
                temp_weights = self.weights
                self.weights = self.average_weights / (len(data) * epoch + 1)
                self.epoch_records[epoch + 1] = {'accuracy': accuracy(self.predict(val_x), val_y),
                                                 'weights': self.weights}
                # Set them back to resume normal algorithm operation.
                self.weights = temp_weights

        # Divide by the total number of examples it has seen.
        self.average_weights = self.average_weights / (len(data) * epochs)
        # Finally set the final weights to the average weights so they will be used for predictions.
        self.weights = self.average_weights
    def train(self, data, labels, epochs=1, record_epochs=False, validation_set=None):
        """
        This method runs a simple version of the perceptron algorithm on the data given.
        :param data: numpy array of each data point to be used for training. This array should already be padded with
        a 1's column in order to ensure a bias weight is included.
        :param labels: numpy array specify the labels {-1, 1}
        :param epochs: number of epochs to run.
        :record_epochs: If set to true will record weights, and accuracy after each epoch.
        :return: None
        """
        # Pad the data with an all ones vector.
        p_data = pad(data)

        # Initialize the weights.
        self.weights = np.random.uniform(low=-0.01, high=0.01, size=p_data.shape[1])

        # Decaying Learning Rate
        t = 0

        for epoch in range(epochs):

            # Go through each data point.
            for x, y in zip(*shuffle(p_data, labels)):

                # If (w^t*x + b)*y < 0 make an update.
                if np.dot(self.weights, x) * y < 0:
                    # Calculate the decayed learning rate.
                    decayed_learning_rate = self.learning_rate / (1 + t)

                    # Update the weights
                    self.weights = self.weights + (self.decayed_learning_rate * y * x)

                    # Record update count.
                    self.update_count += 1

                # Increment t after each example not just mispredictions.
                t += 1

            # record epoch specific information if specified
            if record_epochs:
                val_x, val_y = validation_set[0], validation_set[1]
                self.epoch_records[epoch + 1] = {'accuracy': accuracy(self.predict(val_x), val_y),
                                                 'weights': self.weights}
    def fit(self, user_ids, item_ids, ratings, verbose=True):
        user_ids = user_ids.astype(np.int64)
        item_ids = item_ids.astype(np.int64)

        if not self._initialized:
            self._initialize()

        for epoch_num in range(self._n_iter):
            users, items, ratingss = fn.shuffle(user_ids, item_ids, ratings)

            user_ids_tensor = torch.from_numpy(users)
            item_ids_tensor = torch.from_numpy(items)
            ratings_tensor = torch.from_numpy(ratingss)
            epoch_loss = 0.0

            for (minibatch_num,
                 (batch_user, batch_item, batch_rating)) in enumerate(
                     fn.generate_mini_batch(self._batch_size, user_ids_tensor,
                                            item_ids_tensor, ratings_tensor)):

                predictions = self._net(batch_user, batch_item)

                self._optimizer.zero_grad()

                loss = self._loss_func(predictions, batch_rating)

                epoch_loss = epoch_loss + loss.data.item()

                loss.backward()
                self._optimizer.step()

            epoch_loss = epoch_loss / (minibatch_num + 1)

            if verbose:
                print('Epoch {}: loss {}'.format(epoch_num, epoch_loss))

            if np.isnan(epoch_loss) or epoch_loss == 0.0:
                raise ValueError(
                    'Degenerate epoch loss: {}'.format(epoch_loss))
Beispiel #8
0
 def deal(self):
     newCards = helpers.shuffle([1, 2, 3])
     self.humanPlayer.card, self.player.card = newCards[0], newCards[1]
Beispiel #9
0
    def render(self):
        props = []
        bpy.ops.import_scene.obj(
            filepath=os.path.join(self.MODELS_FOLDER_PATH + 'lightning.obj'),
            use_edges=True)
        logo = bpy.context.selected_objects[0]
        logo.location = helpers.rand_location(self.CANVAS_BOUNDARY)
        props.append(logo)

        bpy.ops.mesh.primitive_grid_add(x_subdivisions=100,
                                        y_subdivisions=100,
                                        location=(0, 6, 2))
        display1 = bpy.context.object
        display1.name = 'display_1'
        bpy.ops.mesh.primitive_grid_add(x_subdivisions=100,
                                        y_subdivisions=100,
                                        location=(6, 0, 2))
        display2 = bpy.context.object
        display2.name = 'display_2'

        bpy.data.groups['displays'].objects.link(display1)
        bpy.data.groups['displays'].objects.link(display2)

        display1.rotation_euler.x += math.radians(90)
        display1.rotation_euler.z -= math.radians(90)
        display2.rotation_euler.x += math.radians(90)
        display2.rotation_euler.y += math.radians(90)
        display2.rotation_euler.z += math.radians(120)

        for display in bpy.data.groups['displays'].objects:
            display.rotation_euler.x += math.radians(90)
            display.scale = self.DISPLAY_SCALE
            helpers.texture_object(display, self.TEXTURE_FOLDER_PATH)
            helpers.unwrap_model(display)
            helpers.glitch(display)

        for j in range(0, random.choice(range(5, 40))):
            for i in range(0, random.choice(range(5, 10))):
                new_line = self.create_line(
                    'line' + str(uuid.uuid1()),
                    self.series(30, self.rand_proba(self.FUNCTIONS), 0.3),
                    random.choice(self.COLORS), 0.003, (j, -10, 2))
                bpy.data.groups['lines'].objects.link(new_line)
                new_line.location.z += i / 3
                props.append(new_line)

        ocean = self.add_ocean(10, 20)

        for index in range(1, 5):
            new_object = helpers.spawn_text(self.TEXT_FILE_PATH)
            bpy.data.groups['texts'].objects.link(new_object)
            props.append(new_object)
            helpers.assign_material(
                new_object, helpers.random_material(self.MATERIALS_NAMES))
            text_scale = random.uniform(0.75, 3)
            new_object.scale = (text_scale, text_scale, text_scale)
            new_object.location = helpers.rand_location(self.CANVAS_BOUNDARY)
            props.append(new_object)

        for obj in bpy.data.groups['neons'].objects:
            self.wireframize(obj, random.choice(self.COLORS))

        for f in range(self.NUMBER_OF_FRAMES):
            bpy.context.scene.frame_set(f)
            for prop in props:
                helpers.shuffle(prop, self.CANVAS_BOUNDARY)
                helpers.assign_material(
                    prop, helpers.random_material(self.MATERIALS_NAMES))