예제 #1
0
    def replay(self, batch_size):
        x_batch, y_batch = [], []
        minibatch = random.sample(self.memory, batch_size)
        for state, action_index, reward, next_state, done in minibatch:

            state = state / 255.0
            if next_state is not None:
                next_state = next_state / 255.0

            y_target = self.model.predict(state)
            y_target[0][
                action_index] = reward if done else reward + self.discount * np.max(
                    self.model.predict(next_state)[0])
            x_batch.append(state[0])
            y_batch.append(y_target[0])

        #self.model.fit(np.array(x_batch), np.array(y_batch), batch_size=len(x_batch), verbose=0)
        self.double_model.fit(np.array(x_batch),
                              np.array(y_batch),
                              batch_size=len(x_batch),
                              verbose=0)

        # we update the double network weights to the main network every 100 steps
        if self.count % 100 == 0:
            self.double_model.save_weights("models/double_model.h5")
            self.model.load_weights("models/double_model.h5")

        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay
예제 #2
0
    def train(self):
        create_file_and_folders_if_not_exist(self.get_csv_log_file_name())
        _callbacks = [
            callbacks.EarlyStopping(monitor='val_loss',
                                    patience=EPOCH_PATIENCE),
            callbacks.CSVLogger(self.get_csv_log_file_name())
        ]

        train_x = np.array(self.char_vectors[0 * SAMPLES_PER_WRITER:50 *
                                             SAMPLES_PER_WRITER])
        test_x = np.array(self.char_vectors[50 * SAMPLES_PER_WRITER:60 *
                                            SAMPLES_PER_WRITER])

        train_y = np.array(self.char_labels[0 * SAMPLES_PER_WRITER:50 *
                                            SAMPLES_PER_WRITER])
        test_y = np.array(self.char_labels[50 * SAMPLES_PER_WRITER:60 *
                                           SAMPLES_PER_WRITER])

        self.model.fit(train_x,
                       train_y,
                       batch_size=self.batch_size,
                       epochs=EPOCHS_LIMIT,
                       callbacks=_callbacks)

        self.model.evaluate(test_x, test_y, batch_size=self.batch_size)

        self.save_model()

        score, acc = self.model.evaluate(test_x, test_y)

        print('Score: %f' % score)
        print('Test accuracy: %f%%' % (acc * 100))
        print('Score', score)
예제 #3
0
def main(layers=4):
    dataset = Dataset(config.DATA_PATH, config.LABELS_PATH)
    X_train, X_test, y_train, y_test = dataset.split(ratio=0.7)

    input_layer_size = len(X_train[0])
    output_layer_size = len(dataset.classes())
    hidden_layer_sizes = config.HIDDEN_LAYER_SIZES[layers - 1]

    clf = Sequential()

    clf.add(Dense(hidden_layer_sizes[0], activation='relu', input_dim=input_layer_size))

    for layer_size in hidden_layer_sizes[1:]:
        clf.add(Dense(layer_size, activation='relu'))

    clf.add(Dense(output_layer_size, activation='softmax'))

    clf.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy', t5er])

    y_train = keras.utils.to_categorical(np.array(y_train), num_classes=output_layer_size)
    y_test = keras.utils.to_categorical(np.array(y_test), num_classes=output_layer_size)

    history = clf.fit(np.array(X_train), y_train, epochs=config.PERCEPTRON_MAX_EPOCHS,
                      batch_size=32, validation_data=(np.array(X_test), y_test))

    print(clf.evaluate(np.array(X_test), y_test, batch_size=32))

    plot_training_accuracy(history, layers)
    plot_validation_accuracy(history, layers)

    create_roc(clf, X_test, y_test, layers, output_layer_size)
예제 #4
0
def create_neural_network(x_train, y_train, path, lr=0.1, epochs=50, batch=50):
    if os.path.isfile(path):
        return load_model(path)

    model = Sequential()

    model.add(Dense(units=8, activation='relu', input_dim=len(x_train[0])))
    model.add(Dropout(0.1))
    model.add(Dense(units=8, activation='tanh'))
    model.add(Dropout(0.1))
    model.add(Dense(units=8, activation='tanh'))
    model.add(Dropout(0.1))
    model.add(Dense(units=len(y_train[0]), activation='relu'))

    sgd = SGD(lr=lr, momentum=0.1, decay=0.0, nesterov=False)
    rmsprop = RMSprop(lr=lr)
    model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['mae'])

    model.fit(np.array(x_train),
              np.array(y_train),
              epochs=epochs,
              batch_size=batch,
              verbose=0)
    # serialize model to JSON
    save_model(model, path)

    net = model

    return net
예제 #5
0
    def train_5fold(self, training_images, training_labels):
        self.model = InceptionV3(include_top=False, weights='imagenet')
        self.model = self.add_new_last_layer(self.model, nb_classes=2)
        self.model.trainable = True
        self.model.compile(optimizer=Adam(lr=0.0001, beta_1=0.1),
                           loss='categorical_crossentropy',
                           metrics=['categorical_accuracy'])

        # self.model.load_weights("D:\Projects\jiaomo-master\Model\model5_resNet5fold\ResNet_best_weights_fold_0.h5")
        k = 5
        train_datagen = ImageDataGenerator(rotation_range=40,
                                           width_shift_range=0.2,
                                           height_shift_range=0.2,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True,
                                           fill_mode='nearest')
        val_datagen = ImageDataGenerator(rotation_range=40,
                                         width_shift_range=0.2,
                                         height_shift_range=0.2,
                                         shear_range=0.2,
                                         zoom_range=0.2,
                                         horizontal_flip=True,
                                         fill_mode='nearest')

        folds, x_train, y_train = self.load_data_kfold(k, training_images,
                                                       training_labels)
        # print(folds)
        print(len(training_images))
        print(len(x_train))
        for j, (train_idx, val_idx) in enumerate(folds):
            print('\nFold ', j)
            x_train_cv = x_train[train_idx]
            y_train_cv = y_train[train_idx]
            y_train_cv = to_categorical(np.array(y_train_cv))
            print(len(x_train_cv))

            x_valid_cv = x_train[val_idx]
            y_valid_cv = y_train[val_idx]
            print(len(x_valid_cv))

            y_valid_cv = to_categorical(np.array(y_valid_cv))
            steps = int(np.size(x_train_cv, 0) // self.batch_size)
            val_steps = int(np.size(x_valid_cv, 0) // self.batch_size)
            name_weights = "_fold_" + str(j)
            self.init_callbacks(name=str(name_weights))
            self.model.fit_generator(
                generator=train_datagen.flow(x=x_train_cv,
                                             y=y_train_cv,
                                             batch_size=self.batch_size),
                epochs=self.args.epoch,
                steps_per_epoch=steps,
                validation_steps=val_steps,
                verbose=1,
                callbacks=self.callbacks,
                validation_data=val_datagen.flow(x=x_valid_cv,
                                                 y=y_valid_cv,
                                                 batch_size=self.batch_size))
            print(self.model.evaluate(x_valid_cv, y_valid_cv))
예제 #6
0
def load_data(data_path):
    """
    加载本地的UCI的训练数据和验证数据
    :param data_path 数据集
    :return: 训练数据和验证数据
    """
    train_path = os.path.join(data_path, "train")
    train_X_path = os.path.join(train_path, "Inertial Signals")

    X_trainS1_x = np.loadtxt(os.path.join(train_X_path,
                                          "body_acc_x_train.txt"))
    X_trainS1_y = np.loadtxt(os.path.join(train_X_path,
                                          "body_acc_y_train.txt"))
    X_trainS1_z = np.loadtxt(os.path.join(train_X_path,
                                          "body_acc_z_train.txt"))
    X_trainS1 = np.array([X_trainS1_x, X_trainS1_y, X_trainS1_z])

    X_trainS1 = X_trainS1.transpose([1, 2, 0])

    Y_train = np.loadtxt(os.path.join(train_path, "y_train.txt"))
    Y_train = to_categorical(Y_train - 1.0)  # 标签是从1开始

    print("训练数据: ")
    print("传感器1: %s, 传感器1的X轴: %s" %
          (str(X_trainS1.shape), str(X_trainS1_x.shape)))
    print("传感器标签: %s" % str(Y_train.shape))
    print("---------------------------------")

    test_path = os.path.join(data_path, "test")
    test_X_path = os.path.join(test_path, "Inertial Signals")

    X_valS1_x = np.loadtxt(os.path.join(test_X_path, "body_acc_x_test.txt"))
    X_valS1_y = np.loadtxt(os.path.join(test_X_path, "body_acc_y_test.txt"))
    X_valS1_z = np.loadtxt(os.path.join(test_X_path, "body_acc_z_test.txt"))
    X_valS1 = np.array([X_valS1_x, X_valS1_y, X_valS1_z])
    X_valS1 = X_valS1.transpose([1, 2, 0])

    Y_val = np.loadtxt(os.path.join(test_path, "y_test.txt"))
    Y_val = to_categorical(Y_val - 1.0)

    print("验证数据: ")
    print("传感器1: %s, 传感器1的X轴: %s" % (str(X_valS1.shape), str(X_valS1.shape)))

    print("传感器标签: %s" % str(Y_val.shape))
    print("\n")

    return X_trainS1, Y_train, X_valS1, Y_val
예제 #7
0
def get_single_frame(image_name):
    image = cv2.imread(image_name, cv2.IMREAD_GRAYSCALE)
    resized_image = cv2.resize(image,
                               None,
                               fx=0.5,
                               fy=0.5,
                               interpolation=cv2.INTER_AREA)
    return np.array(resized_image)
예제 #8
0
 def fragment_gravity_center(self, state, fragment_number, dim):
     """The center of weight of the fragment
     fragment_number: should be in range [0,1]
     dim: should be in ['x','y']"""
     offset = 0 if dim == 'x' else 1
     start = 20 * fragment_number + 2
     up = state[start + offset:start + 20 + offset:4]
     bottom = state[start + offset + 40:start + 60 + offset:4]
     return np.mean(np.array([up, bottom]))
예제 #9
0
 def fragment_velocity(self, state, fragment_number, dim):
     """The velocity of the fragment
     fragment_number: should be in range [0,1,2]
     dim: should be in ['x','y']"""
     offset = 0 if dim == 'x' else 1
     start = 20 * fragment_number + 4
     up = state[start + offset:start + 20 + offset:4]
     bottom = state[start + offset + 40:start + 60 + offset:4]
     return np.mean(np.array([up, bottom]))
예제 #10
0
 def process_frame(self, frame):
     if frame is None:
         return None
     frame = np.array(frame)
     frame = resize(frame, (self.frame_width, self.frame_height))
     frame = np.reshape(
         frame, [1, self.frame_height, self.frame_width, self.state_size])
     frame = 255 * frame
     frame = np.uint8(frame)
     return frame
예제 #11
0
    def replay(self, batch_size):
        x_batch, y_batch = [], []
        minibatch = random.sample(self.memory, batch_size)
        for state, action_index, reward, next_state, done in minibatch:
            y_target = self.model.predict(state)
            y_target[0][
                action_index] = reward if done else reward + self.discount * np.max(
                    self.model.predict(next_state)[0])
            x_batch.append(state[0])
            y_batch.append(y_target[0])

        self.model.fit(np.array(x_batch),
                       np.array(y_batch),
                       batch_size=len(x_batch),
                       verbose=1)

        if self.epsilon > self.epsilon_min:
            #None
            self.epsilon *= self.epsilon_decay
 def create_image_dataset(self, directory):
     dataset = []
     filenames = os.listdir(directory)
     for f in filenames:
         if f.endswith(IMAGE_FORMAT):
             image = tiff.imread(directory + f)
             if len(image) == IMAGE_SIZE:
                 dataset.append(image)
     dataset = np.array(dataset, np.float16) #/ 1024.
     return dataset
예제 #13
0
 def learn(self, batch_size):
     if not self.is_learning:
         return
     # Not enough samples
     if len(self.memory) < batch_size:
         return
     minibatch = random.sample(self.memory, batch_size)
     for state, action, reward, next_state, done in minibatch:
         state = np.array([state])
         target = reward + self.gamma * \
                           np.amax(self.model.predict(np.array([next_state]))[0])
         if done:
             target += 10
         target_f = self.model.predict(state)
         target_f[0][action] = target
         self.model.fit(state, target_f, epochs=1, verbose=0)
     if self.epsilon > self.epsilon_min:
         self.epsilon *= self.epsilon_decay
     print(self.epsilon)
예제 #14
0
def create_roc(clf, X_test, y_test, layers, output_layer_size):
    y_score = clf.predict(np.array(X_test))
    # Plot linewidth.
    lw = 2

    # Compute ROC curve and ROC area for each class
    n_classes = output_layer_size
    fpr = dict()
    tpr = dict()
    roc_auc = dict()
    for i in range(n_classes):
        fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
        roc_auc[i] = auc(fpr[i], tpr[i])

    # Compute micro-average ROC curve and ROC area
    fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
    roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])

    # Compute macro-average ROC curve and ROC area
    # First aggregate all false positive rates
    all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))

    # Then interpolate all ROC curves at this points
    mean_tpr = np.zeros_like(all_fpr)

    for i in range(n_classes):
        mean_tpr += interp(all_fpr, fpr[i], tpr[i])

    # Finally average it and compute AUC
    mean_tpr /= n_classes
    fpr["macro"] = all_fpr
    tpr["macro"] = mean_tpr
    roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])

    # Plot all ROC curves
    plt.figure(1)
    plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})'.format(roc_auc["micro"]),
             color='deeppink', linestyle=':', linewidth=4)
    plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})'.format(roc_auc["macro"]),
             color='navy', linestyle=':', linewidth=4)

    plt.plot([0, 1], [0, 1], 'k--', lw=lw)
    plt.xlim([0.0, 1.05])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic to bird_classification')
    plt.legend(loc="lower right")
    plt.axes().set_aspect('equal')
    plt.grid(True)

    name = 'plot_roc{}_lbp_radius{}.png'.format(layers, config.LBP_RADIUS)

    plt.savefig(figure_path(name))
    plt.clf()
예제 #15
0
    def act(self, state):
        if np.random.rand() <= self.epsilon:
            # The agent acts randomly
            # print('rand: ', end='')
            return self.random_action()
            # Predict the reward value based on the given state

        # print('net: ', end='')
        act_values = self.model.predict(np.array([state]))
        # print(act_values, end=', ')
        # Pick the action based on the predicted reward
        return np.argmax(act_values[0])
예제 #16
0
    def getAction(self, state):
        """
          Compute the action to take in the current state.  With
          probability self.epsilon, we should take a random action and
          take the best policy action otherwise.  Note that if there are
          no legal actions, which is the case at the terminal state, you
          should choose None as the action.

          HINT: You might want to use util.flipCoin(prob)
          HINT: To pick randomly from a list, use random.choice(list)
        """
        # Pick Action
        legalActions = self.getLegalActions(state)
        # if 'Stop' in legalActions:
        #     legalActions.remove('Stop')

        action = None
        "*** YOUR CODE HERE ***"
        if not self.getLegalActions(state):
            return action  # Terminal State, return None

        self.image = getFrame()
        self.image = np.array(self.image)
        self.image = resize(self.image, (self.frame_width, self.frame_height))
        self.image = np.reshape(self.image,
                                [1, self.frame_height, self.frame_width, 3])
        self.image = np.uint8(self.image)

        #print 'Epsilon value: ', self.epsilon
        if self.epsilon > random.random():
            action = random.choice(legalActions)  # Explore
        else:
            #action = self.computeActionFromQValues(state)  # Exploit
            #state_matrix = self.getStateMatrices(state)
            #state_matrix = np.reshape(np.array(state_matrix), [1, self.state_size])
            #state_matrix = np.reshape(state_matrix, (1, self.frame_width, self.frame_height))

            act_values = self.model.predict(self.image)
            action = PACMAN_ACTIONS[(np.argmax(
                act_values[0]))]  # returns action

            if action not in legalActions:
                action = 'Stop'
                #action = random.choice(legalActions)

        self.doAction(state, action)
        return action
    def analyze(self, signals_group: SignalsGroup) -> AutoencoderResult:
        if len(signals_group.signals) != self._signals_count:
            raise ValueError(
                f'Модель может обработать строго {self._signals_count} сигналов'
            )

        model_path = self._get_model_path(signals_group.name)
        if not os.path.exists(model_path):
            raise FileNotFoundError(
                f'Модель для группы сигналов {signals_group.name} не найдена. Выполните обучение'
            )
        self._model.load_weights(model_path)

        data = np.array([
            self._preprocess(signal)
            for signal in signals_group.signals_data.values()
        ])
        data_stacked = np.column_stack(data)
        # samples, sample_len, features
        data_reshaped = data_stacked.reshape(data_stacked.shape[0],
                                             data_stacked.shape[1], 1)

        decoded_data_reshaped = self._model.predict(data_reshaped,
                                                    batch_size=self.BATCH_SIZE)
        decoded_data = decoded_data_reshaped.reshape(data_stacked.shape)
        decoded_data = np.column_stack(decoded_data)

        mse = np.sum([
            squared_error(predictions, targets)
            for predictions, targets in zip(decoded_data, data)
        ],
                     axis=0)
        ewma_mse = ewma(mse,
                        window=self.EWMA_WINDOW_SIZE,
                        alpha=self.EWMA_ALPHA)

        return AutoencoderResult(
            signals=data,
            decoded_signals=decoded_data,
            mse=mse,
            ewma_mse=ewma_mse,
        )
예제 #18
0
 def distance_of_the_middle_segment(self, state):
     """Distance between the middle segment and the target point"""
     x = np.mean(np.array([state[18], state[22], state[62], state[66]]))
     y = np.mean(np.array([state[19], state[23], state[63], state[67]]))
     return self.distance(x, y, self.end_point[0], self.end_point[1])
예제 #19
0
 def distance_of_the_last_segment(self, state):
     """Distance between the last segment and the target point"""
     x = np.mean(np.array([state[34], state[38], state[74], state[78]]))
     y = np.mean(np.array([state[35], state[39], state[75], state[79]]))
     return self.distance(x, y, self.end_point[0], self.end_point[1])
    def evaluate(self,
                 generator,
                 iou_threshold=0.3,
                 score_threshold=0.3,
                 max_detections=100,
                 save_path=None):
        """ Evaluate a given dataset using a given model.
        code originally from https://github.com/fizyr/keras-retinanet

        # Arguments
            generator       : The generator that represents the dataset to evaluate.
            model           : The model to evaluate.
            iou_threshold   : The threshold used to consider when a detection is positive or negative.
            score_threshold : The score confidence threshold to use for detections.
            max_detections  : The maximum number of detections to use per image.
            save_path       : The path to save images with visualized detections to.
        # Returns
            A dict mapping class names to mAP scores.
        """
        # gather all detections and annotations
        all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
        all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]

        for i in range(generator.size()):
            raw_image = generator.load_image(i)
            raw_height, raw_width, raw_channels = raw_image.shape

            # make the boxes and the labels
            pred_boxes = self.predict(raw_image)

            score = np.array([box.score for box in pred_boxes])
            pred_labels = np.array([box.label for box in pred_boxes])

            if len(pred_boxes) > 0:
                pred_boxes = np.array([[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width,
                                        box.ymax * raw_height, box.score] for box in pred_boxes])
            else:
                pred_boxes = np.array([[]])

                # sort the boxes and the labels according to scores
            score_sort = np.argsort(-score)
            pred_labels = pred_labels[score_sort]
            pred_boxes = pred_boxes[score_sort]

            # copy detections to all_detections
            for label in range(generator.num_classes()):
                all_detections[i][label] = pred_boxes[pred_labels == label, :]

            annotations = generator.load_annotation(i)

            # copy detections to all_annotations
            for label in range(generator.num_classes()):
                all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()

        # compute mAP by comparing all detections and all annotations
        average_precisions = {}

        for label in range(generator.num_classes()):
            false_positives = np.zeros((0,))
            true_positives = np.zeros((0,))
            scores = np.zeros((0,))
            num_annotations = 0.0

            for i in range(generator.size()):
                detections = all_detections[i][label]
                annotations = all_annotations[i][label]
                num_annotations += annotations.shape[0]
                detected_annotations = []

                for d in detections:
                    scores = np.append(scores, d[4])

                    if annotations.shape[0] == 0:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)
                        continue

                    overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
                    assigned_annotation = np.argmax(overlaps, axis=1)
                    max_overlap = overlaps[0, assigned_annotation]

                    if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                        false_positives = np.append(false_positives, 0)
                        true_positives = np.append(true_positives, 1)
                        detected_annotations.append(assigned_annotation)
                    else:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)

            # no annotations -> AP for this class is 0 (is this correct?)
            if num_annotations == 0:
                average_precisions[label] = 0
                continue

            # sort by score
            indices = np.argsort(-scores)
            false_positives = false_positives[indices]
            true_positives = true_positives[indices]

            # compute false positives and true positives
            false_positives = np.cumsum(false_positives)
            true_positives = np.cumsum(true_positives)

            # compute recall and precision
            recall = true_positives / num_annotations
            precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)

            # compute average precision
            average_precision = compute_ap(recall, precision)
            average_precisions[label] = average_precision

        return average_precisions
예제 #21
0
def train():
    x_data, y_data = read(os.getcwd() +
                          "\datasets\\2\parkinsons_updrs.data.csv")

    reduces = [y[0] for y in y_data]
    x_data = feature_selection(np.array(x_data), np.array(reduces)).tolist()

    x_data, m, s = normalize_data(x_data)

    x_train, x_validation, x_test = divide_data_set(x_data)
    y_train, y_validation, y_test = divide_data_set(y_data)

    motor_x_train = np.array([[y[0]] for y in y_train])
    total_x_train = np.array([[y[1]] for y in y_train])
    motor_x_test = np.array([[y[0]] for y in y_test])
    total_x_test = np.array([[y[1]] for y in y_test])
    x_test = np.array(x_test)
    y_test = np.array(y_test)
    print("\t\tFinished divide data...")
    n_x_train = np.array(x_train)
    n_y_train = np.array([[y[0]] for y in y_train])
    n_y_train = np.array(y_train)
    print("\t\tTraining neural network...")
    # Step 6: train network
    path = input("Input output file name for neural network(enter for end): ")
    while path != "":
        for i in range(0, 10):
            net = create_neural_network(n_x_train,
                                        n_y_train,
                                        path=os.getcwd() + "\\" + path,
                                        lr=(i + 1) / 10)
            test(x_test, y_test, net, i)
        path = input(
            "Input output file name for neural network(enter for end): ")
예제 #22
0
                 memory_unit, n_layers_postprocessing, t_pred, mask_value, n_features_pred, regularization):
    netin = Input(batch_shape=(batch_size, t_train + t_pred, n_features_train * n_stations))
    mask = Masking(mask_value=mask_value)(netin)

    preprocessing = create_preprocessing(mask, n_stations, activation, n_layers_preprocessing, width, regularization)
    memory_unit = create_memory(preprocessing, memory_unit, n_layers_memory, int(n_stations / n_layers_preprocessing))
    shift = Lambda(lambda x: x[:, -t_pred:, :])(memory_unit)
    postprocessing = create_postprocessing(shift, activation, n_layers_postprocessing,
                                           int(n_stations / n_layers_preprocessing), regularization)
    out = Dense(n_features_pred)(postprocessing)
    model = Model(netin, out)

    return model


if __name__ == '__main__':
    monday_til_saturday = np.array([
        [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]],
        [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]],
        [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]]
    ])
    sunday = np.array([7,
                       7,
                       7,
                       ])
    model = meijer_net()
    model.compile(optimizer='Adam', loss='mean_squared_error')
    model.fit(x=monday_til_saturday, y=sunday, epochs=1, batch_size=3)
    prediction = model.predict(monday_til_saturday, batch_size=3, verbose=2)
    print(prediction)
예제 #23
0
파일: train.py 프로젝트: Zeqiang-Lai/KE103
train_steps, train_generator = utils.batch_iter(x_train,
                                                y_train,
                                                NUM_LABEL,
                                                BATCH_SIZE,
                                                shuffle=SHUFFLE)
model.fit_generator(generator=train_generator,
                    steps_per_epoch=train_steps,
                    epochs=EPOCHS,
                    verbose=VERBOSE)
# model.save(MODEL_PATH)
# model.save_weights(MODEL_PATH)

print('Validation set')
# Score
X_valid = utils.process_data_for_keras(NUM_LABEL, x_valid)
length = np.array([len(sent) for sent in x_valid], dtype='int32')
y_pred = model.predict(X_valid)
y = np.argmax(y_pred, -1)
y_pred = [iy[:l] for iy, l in zip(y, length)]

true = MultiLabelBinarizer().fit_transform(y_valid)
pred = MultiLabelBinarizer().fit_transform(y_pred)
score = sk_f1_score(true, pred, average='micro')
print('F1(sk-learn): {0}'.format(score))

a = np.array(y_valid).flatten()
b = np.array(y_pred).flatten()
f1_v1 = F1_score_v1(a, b, label2idx, idx2label)
print('F1(Any Overlap OK): {0}'.format(f1_v1))

f1_v2 = F1_score_v2(y_valid, y_pred, label2idx, idx2label)
예제 #24
0
def load_data():
    x_train = []
    y_train = []
    x_test = []
    y_test = []
    ratings = defaultdict(lambda: Rating())
    cutoff = int(len(DATA) * 0.7)
    for i, scene in enumerate(DATA):
        if len(scene['fights']) < 10:
            continue
        x = []
        y = []
        for fight in reversed(scene['fights']):
            # skip if no odds:
            if 'odds' not in fight:
                continue

            f1 = fight['fighters'][0]['name']
            f2 = fight['fighters'][1]['name']

            f1_odds = fight['odds'][f1]
            f2_odds = fight['odds'][f2]
            if not -50 < f1_odds < 50 or not -50 < f2_odds < 50:
                raise ValueError(
                    f'surely these odds are wrong? {f1_odds} {f2_odds}')

            win1_prob = win_probability([ratings[f1]], [ratings[f2]])
            win2_prob = win_probability([ratings[f2]], [ratings[f1]])

            # get winner
            fw = fight['winner']['fighter']
            is_win_1 = fw == f1
            fl = f2 if is_win_1 else f1
            if not is_win_1 and fw != f2 and fw is not None:
                raise ValueError(f'unknown winner {fw}')
            drawn = fw is None

            x.extend([
                1 / f1_odds,
                1 / f2_odds,
                win1_prob,
                win2_prob,
            ])

            y.extend([
                fight['odds'][f1] if is_win_1 else 0,
                fight['odds'][f2] if not is_win_1 else 0,
            ])

            # update ratings
            ratings[fw], ratings[fl] = rate_1vs1(ratings[fw],
                                                 ratings[fl],
                                                 drawn=drawn)

            if len(x) == 40:
                break

        # add data and results for rewards
        if i < cutoff:
            x_train.append(x)
            y_train.append(y)
        else:
            x_test.append(x)
            y_test.append(y)

    return np.array(x_train), np.array(y_train), np.array(x_test), np.array(
        y_test)
예제 #25
0
from keras import Input, Model
from keras.layers import Dense, LSTM, np

deep_lstm = Input(shape=(None, 2))
dl = Dense(4)(deep_lstm)
dl = LSTM(1)(dl)

model = Model(inputs=deep_lstm, outputs=dl)
model.compile(optimizer='rmsprop', loss="mse")

x = np.array([[[1, 2], [3, 4]], [[4, 3], [2, 1]]])
y = np.array([1, -1])

model.fit(x, y, epochs=100)

print(model.predict(np.reshape(x[0], (-1, 2, 2))))
model = models.Sequential()

# use model.add() to add any layers you like
# read Keras documentation to find which layers you can use:
#           https://keras.io/layers/core/
#           https://keras.io/layers/convolutional/
#           https://keras.io/layers/pooling/
#

model.add(layers.Conv2D(32, (3, 3), input_shape=(128, 128, 3)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
keras.layers.ActivityRegularization(l1=0.0, l2=0.0)
keras.layers.Masking(mask_value=0.0)
lr = np.array([1e-6, 1e-4, 1e-2])
keras.layers.AveragePooling3D(pool_size=(2, 2, 2),
                              strides=None,
                              padding='valid',
                              data_format=None)

model.add(layers.Conv2D(32, (3, 3)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))

model.add(layers.Conv2D(32, (3, 3)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
keras.layers.Dropout(0, noise_shape=None, seed=None)

model.add(Flatten())