コード例 #1
0
ファイル: baseline.py プロジェクト: alexliu2360/ai_stat
 def on_epoch_end(self, epoch, logs={}):
     #         val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()
     val_predict = np.argmax(np.asarray(
         self.model.predict(self.validation_data[0])),
                             axis=1)
     #         val_targ = self.validation_data[1]
     val_targ = np.argmax(self.validation_data[1], axis=1)
     _val_f1 = f1_score(val_targ, val_predict, average='macro')
     #         _val_recall = recall_score(val_targ, val_predict)
     #         _val_precision = precision_score(val_targ, val_predict)
     self.val_f1s.append(_val_f1)
     #         self.val_recalls.append(_val_recall)
     #         self.val_precisions.append(_val_precision)
     #         print('— val_f1: %f — val_precision: %f — val_recall %f' %(_val_f1, _val_precision, _val_recall))
     print(' — val_f1:', _val_f1)
     return
コード例 #2
0
def confu_matrix_gen(data_dir, validation_generator, model=None):
    # if model ==None:
    #     model = applications.ResNet50(weights=None, input_shape=(img_width, img_height, 3), classes=2)
    #
    y_real = validation_generator.classes
    y_preds = model.predict_generator(validation_generator, verbose=0)
    y_preds = np.argmax(y_preds, axis=1)

    print('real class:', y_real)
    print('predicted class:', y_preds)

    classes = 20
    y_preds_m = []
    y_real_m = []
    y_index = 0
    for i, j in zip(y_preds, y_real):
        if i < classes and j < classes:
            y_preds_m.append(i)
            y_real_m.append(j)

    cnf_matrix = confusion_matrix(y_real_m, y_preds_m)
    print('----------confusion matrix--------')
    print(cnf_matrix)
    # write into file
    # cnf_mtx_file = open("matrix.txt", "w")
    # cnf_mtx_file.write(str(cnf_matrix))
    # cnf_mtx_file.close()

    labels = np.arange(classes)
    plot_confusion_matrix(cnf_matrix, classes=labels)
    plt.figure()
    plot_confusion_matrix(cnf_matrix, classes=labels, normalize=True)
    np.set_printoptions(precision=2)
    plt.show()
コード例 #3
0
 def get_next_step(self, state, game):
     next_step = np.random.choice(list(game.action_space))
     if np.random.uniform() <= self.ratio_explotacion:
         q = self.model.predict(self.prepare_state(state))
         idx_action = np.argmax(q[0])
         next_step = list(game.action_space)[idx_action]
     return next_step
コード例 #4
0
ファイル: baseline.py プロジェクト: alexliu2360/ai_stat
def train_CV_CNN(train_x=dealed_train,
                 test_x=dealed_test,
                 val_x=dealed_val,
                 y_cols=y_cols,
                 debug=False,
                 folds=2):
    model = build_model()
    model.compile(optimizer='adam', loss='categorical_crossentropy')
    F1_scores = 0
    F1_score = 0
    metrics = Metrics()
    if debug:
        y_cols = ['location_traffic_convenience']
    for index, col in enumerate(y_cols):
        train_y = train[col] + 2
        val_y = val[col] + 2
        y_val_pred = 0
        y_test_pred = 0
        result = {}
        for i in range(1):
            X_train, X_test, y_train, y_test = train_test_split(
                train_x, train_y, test_size=0.2, random_state=100 * i)
            y_train_onehot = to_categorical(y_train)
            y_test_onehot = to_categorical(y_test)
            history = model.fit(X_train,
                                y_train_onehot,
                                epochs=20,
                                batch_size=64,
                                validation_data=(X_test, y_test_onehot),
                                callbacks=[metrics])

            # 预测验证集和测试集
            y_val_pred = model.predict(val_x)
            y_test_pred += model.predict(test_x)

            y_val_pred = np.argmax(y_val_pred, axis=1)

            F1_score = f1_score(y_val_pred, val_y, average='macro')
            F1_scores += F1_score

            print(col, 'f1_score:', F1_score, 'ACC_score:',
                  accuracy_score(y_val_pred, val_y))
        y_test_pred = np.argmax(y_test_pred, axis=1)
        result[col] = y_test_pred - 2
    print('all F1_score:', F1_scores / len(y_cols))
    return result
コード例 #5
0
    def getAction(self, state):
        """
          Compute the action to take in the current state.  With
          probability self.epsilon, we should take a random action and
          take the best policy action otherwise.  Note that if there are
          no legal actions, which is the case at the terminal state, you
          should choose None as the action.

          HINT: You might want to use util.flipCoin(prob)
          HINT: To pick randomly from a list, use random.choice(list)
        """
        # Pick Action
        legalActions = self.getLegalActions(state)
        action = None

        "*** YOUR CODE HERE ***"
        if not self.getLegalActions(state):
            return action  # Terminal State, return None

        #if self.image is None: # we only need to compute it the first time. afterwards, well get a copy from nextframe
        if self.new_episode:
            # self.new_episode = True
            self.image = self.process_frame(getFrame())
            self.new_episode = False

        # epsilon greedy: exploit - explore
        if self.epsilon > random.random():
            action = random.choice(legalActions)  # Explore
        else:
            act_values = self.model.predict(self.image / 255.0)  # Exploit

            for value in act_values[0]:
                action = PACMAN_ACTIONS[(np.argmax(act_values[0]))]
                if action not in legalActions:
                    act_values[0][np.argmax(act_values[0])] = -10000
                else:
                    break

            if action not in legalActions:
                print 'action was not legal'
                action = 'Stop'

        self.doAction(state, action)
        return action
コード例 #6
0
def decoder_sequence(input_seq):
    src_to_index, tar_to_index = [], []

    index_to_src = dict((i, char) for char, i in src_to_index.items())
    index_to_tar = dict((i, char) for char, i in tar_to_index.items())

    model = load_model('./model/')
    encoder_inputs = model.input[0]  # input_1
    encoder_outputs, state_h_enc, state_c_enc = model.layers[
        2].output  # lstm_1
    encoder_states = [state_h_enc, state_c_enc]
    encoder_model = Model(encoder_inputs, encoder_states)

    states_value = encoder_model.predict(input_seq)

    decoder_inputs = model.input[1]  # input_2
    decoder_state_input_h = Input(shape=(256, ), name='input_3')
    decoder_state_input_c = Input(shape=(256, ), name='input_4')
    decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
    decoder_lstm = model.layers[3]
    decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(
        decoder_inputs, initial_state=decoder_states_inputs)
    decoder_states = [state_h_dec, state_c_dec]
    decoder_dense = model.layers[4]
    decoder_outputs = decoder_dense(decoder_outputs)

    decoder_model = Model([decoder_inputs] + decoder_states_inputs,
                          [decoder_outputs] + decoder_states)

    max_tar_len = decoder_inputs.shape[1]
    tar_vocab_size = decoder_inputs.shape[2]

    target_seq = np.zeros((1, 1, tar_vocab_size))
    target_seq[0, 0, tar_to_index['\t']] = 1

    stop_condition = False
    decoded_sentence = ""
    while not stop_condition:  # stop_condition이 True가 될 때까지 루프 반복
        output_tokens, h, c = decoder_model.predict([target_seq] +
                                                    states_value)
        sampled_token_index = np.argmax(output_tokens[0, -1, :])
        sampled_char = index_to_tar[sampled_token_index]
        decoded_sentence += sampled_char

        # <sos>에 도달하거나 최대 길이를 넘으면 중단.
        if (sampled_char == '\n' or len(decoded_sentence) > max_tar_len):
            stop_condition = True

        # 길이가 1인 타겟 시퀀스를 업데이트 합니다.
        target_seq = np.zeros((1, 1, tar_vocab_size))
        target_seq[0, 0, sampled_token_index] = 1.

        # 상태를 업데이트 합니다.
        states_value = [h, c]

    return decoded_sentence
コード例 #7
0
ファイル: RLAgent.py プロジェクト: mrugacz95/octopus-arm-rl
    def act(self, state):
        if np.random.rand() <= self.epsilon:
            # The agent acts randomly
            # print('rand: ', end='')
            return self.random_action()
            # Predict the reward value based on the given state

        # print('net: ', end='')
        act_values = self.model.predict(np.array([state]))
        # print(act_values, end=', ')
        # Pick the action based on the predicted reward
        return np.argmax(act_values[0])
コード例 #8
0
    def getAction(self, state):
        """
          Compute the action to take in the current state.  With
          probability self.epsilon, we should take a random action and
          take the best policy action otherwise.  Note that if there are
          no legal actions, which is the case at the terminal state, you
          should choose None as the action.

          HINT: You might want to use util.flipCoin(prob)
          HINT: To pick randomly from a list, use random.choice(list)
        """
        # Pick Action
        legalActions = self.getLegalActions(state)
        # if 'Stop' in legalActions:
        #     legalActions.remove('Stop')

        action = None
        "*** YOUR CODE HERE ***"
        if not self.getLegalActions(state):
            return action  # Terminal State, return None

        self.image = getFrame()
        self.image = np.array(self.image)
        self.image = resize(self.image, (self.frame_width, self.frame_height))
        self.image = np.reshape(self.image,
                                [1, self.frame_height, self.frame_width, 3])
        self.image = np.uint8(self.image)

        #print 'Epsilon value: ', self.epsilon
        if self.epsilon > random.random():
            action = random.choice(legalActions)  # Explore
        else:
            #action = self.computeActionFromQValues(state)  # Exploit
            #state_matrix = self.getStateMatrices(state)
            #state_matrix = np.reshape(np.array(state_matrix), [1, self.state_size])
            #state_matrix = np.reshape(state_matrix, (1, self.frame_width, self.frame_height))

            act_values = self.model.predict(self.image)
            action = PACMAN_ACTIONS[(np.argmax(
                act_values[0]))]  # returns action

            if action not in legalActions:
                action = 'Stop'
                #action = random.choice(legalActions)

        self.doAction(state, action)
        return action
コード例 #9
0
ファイル: train.py プロジェクト: Zeqiang-Lai/KE103
                                                NUM_LABEL,
                                                BATCH_SIZE,
                                                shuffle=SHUFFLE)
model.fit_generator(generator=train_generator,
                    steps_per_epoch=train_steps,
                    epochs=EPOCHS,
                    verbose=VERBOSE)
# model.save(MODEL_PATH)
# model.save_weights(MODEL_PATH)

print('Validation set')
# Score
X_valid = utils.process_data_for_keras(NUM_LABEL, x_valid)
length = np.array([len(sent) for sent in x_valid], dtype='int32')
y_pred = model.predict(X_valid)
y = np.argmax(y_pred, -1)
y_pred = [iy[:l] for iy, l in zip(y, length)]

true = MultiLabelBinarizer().fit_transform(y_valid)
pred = MultiLabelBinarizer().fit_transform(y_pred)
score = sk_f1_score(true, pred, average='micro')
print('F1(sk-learn): {0}'.format(score))

a = np.array(y_valid).flatten()
b = np.array(y_pred).flatten()
f1_v1 = F1_score_v1(a, b, label2idx, idx2label)
print('F1(Any Overlap OK): {0}'.format(f1_v1))

f1_v2 = F1_score_v2(y_valid, y_pred, label2idx, idx2label)
print('F1(exact match): {0}'.format(f1_v2))
コード例 #10
0
ファイル: train.py プロジェクト: mvanmeerbeck/fruits-detector
evaluation = model.evaluate_generator(test_generator,
                                      steps=test_generator.n //
                                      test_generator.batch_size,
                                      verbose=1)

print(evaluation)
with open(output_dir + '/evaluation.txt', 'w') as f:
    f.write(str(evaluation[0]) + "\n")
    f.write(str(evaluation[1]))

print("prediction time")
test_generator.reset()

pred = model.predict_generator(test_generator,
                               steps=test_generator.n //
                               test_generator.batch_size,
                               verbose=1)

predicted_class_indices = np.argmax(pred, axis=1)

labels = (train_generator.class_indices)
labels = dict((v, k) for k, v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]

filenames = test_generator.filenames
results = pd.DataFrame({"Filename": filenames, "Predictions": predictions})
results.to_csv(output_dir + "/predictions.csv", index=False)

np.save(output_dir + '/class_indices', train_generator.class_indices)
model.save(output_dir + '/model.h5')
コード例 #11
0
ファイル: test_perceptron_tf.py プロジェクト: redcyb/rbm
#     Dense(392, input_shape=(784,)),
#     Activation('sigmoid'),
#     Dense(64),
#     Activation('sigmoid'),
#     Dense(10),
#     Activation('sigmoid'),
# ])
model = Sequential([
    Dense(64, input_shape=(784, )),
    Activation('sigmoid'),
    Dense(10),
    Activation('sigmoid'),
])

model.compile(optimizer='rmsprop', loss='mse')

# Train the model, iterating on the data in batches of 32 samples
model.load_weights("./weights/keras___784_64_10___100")
# model.fit(x_train, y_train, epochs=100, batch_size=32)
# model.save_weights("./weights/keras___784_64_10___100")

predict = model.predict_classes(x_test)
classes = np.argmax(y_test, axis=1)

result = np.vstack((classes, predict))
result = np.vstack((result, predict == classes)).T

match = np.count_nonzero(result[:, 2])

print(result)
コード例 #12
0
    def evaluate(self,
                 generator,
                 iou_threshold=0.3,
                 score_threshold=0.3,
                 max_detections=100,
                 save_path=None):
        """ Evaluate a given dataset using a given model.
        code originally from https://github.com/fizyr/keras-retinanet

        # Arguments
            generator       : The generator that represents the dataset to evaluate.
            model           : The model to evaluate.
            iou_threshold   : The threshold used to consider when a detection is positive or negative.
            score_threshold : The score confidence threshold to use for detections.
            max_detections  : The maximum number of detections to use per image.
            save_path       : The path to save images with visualized detections to.
        # Returns
            A dict mapping class names to mAP scores.
        """
        # gather all detections and annotations
        all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
        all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]

        for i in range(generator.size()):
            raw_image = generator.load_image(i)
            raw_height, raw_width, raw_channels = raw_image.shape

            # make the boxes and the labels
            pred_boxes = self.predict(raw_image)

            score = np.array([box.score for box in pred_boxes])
            pred_labels = np.array([box.label for box in pred_boxes])

            if len(pred_boxes) > 0:
                pred_boxes = np.array([[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width,
                                        box.ymax * raw_height, box.score] for box in pred_boxes])
            else:
                pred_boxes = np.array([[]])

                # sort the boxes and the labels according to scores
            score_sort = np.argsort(-score)
            pred_labels = pred_labels[score_sort]
            pred_boxes = pred_boxes[score_sort]

            # copy detections to all_detections
            for label in range(generator.num_classes()):
                all_detections[i][label] = pred_boxes[pred_labels == label, :]

            annotations = generator.load_annotation(i)

            # copy detections to all_annotations
            for label in range(generator.num_classes()):
                all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()

        # compute mAP by comparing all detections and all annotations
        average_precisions = {}

        for label in range(generator.num_classes()):
            false_positives = np.zeros((0,))
            true_positives = np.zeros((0,))
            scores = np.zeros((0,))
            num_annotations = 0.0

            for i in range(generator.size()):
                detections = all_detections[i][label]
                annotations = all_annotations[i][label]
                num_annotations += annotations.shape[0]
                detected_annotations = []

                for d in detections:
                    scores = np.append(scores, d[4])

                    if annotations.shape[0] == 0:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)
                        continue

                    overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
                    assigned_annotation = np.argmax(overlaps, axis=1)
                    max_overlap = overlaps[0, assigned_annotation]

                    if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                        false_positives = np.append(false_positives, 0)
                        true_positives = np.append(true_positives, 1)
                        detected_annotations.append(assigned_annotation)
                    else:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)

            # no annotations -> AP for this class is 0 (is this correct?)
            if num_annotations == 0:
                average_precisions[label] = 0
                continue

            # sort by score
            indices = np.argsort(-scores)
            false_positives = false_positives[indices]
            true_positives = true_positives[indices]

            # compute false positives and true positives
            false_positives = np.cumsum(false_positives)
            true_positives = np.cumsum(true_positives)

            # compute recall and precision
            recall = true_positives / num_annotations
            precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)

            # compute average precision
            average_precision = compute_ap(recall, precision)
            average_precisions[label] = average_precision

        return average_precisions
コード例 #13
0
def mnist_cnnv_datagen():
    """
    使用keras图片增强
    :return:
    """
    batch_size = 128
    nb_classes = 10  # 分类数
    nb_epoch = 12  # 训练轮数
    # 输入图片的维度
    img_rows, img_cols = 28, 28
    # 卷积滤镜的个数
    nb_filters = 32
    # 最大池化,池化核大小
    pool_size = (2, 2)
    # 卷积核大小
    kernel_size = (3, 3)

    (X_train, y_train), (X_test, y_test) = mnist.load_data(os.path.join(root_path, "data", "mnist", "mnist.npz"))

    if K.image_dim_ordering() == 'th':
        # 使用 Theano 的顺序:(conv_dim1, channels, conv_dim2, conv_dim3)
        X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        # 使用 TensorFlow 的顺序:(conv_dim1, conv_dim2, conv_dim3, channels)
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    model = keras.models.Sequential()

    model.add(Convolution2D(nb_filters, kernel_size, padding='same', input_shape=input_shape,
                            data_format="channels_last", activation="relu"))
    model.add(Convolution2D(nb_filters, kernel_size, padding='same', activation="relu"))
    model.add(Convolution2D(nb_filters, kernel_size, padding='same', activation="relu"))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(0.25))

    model.add(Convolution2D(nb_filters * 2, kernel_size, padding='same', activation="relu"))
    model.add(Convolution2D(nb_filters * 2, kernel_size, padding='same', activation="relu"))
    model.add(Convolution2D(nb_filters * 2, kernel_size, padding='same', activation="relu"))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(128, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes, activation="softmax"))

    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    # 图像增强
    train_datagen = image.ImageDataGenerator(rotation_range=10, width_shift_range=0.2, height_shift_range=0.2,
                                             horizontal_flip=True, vertical_flip=False)
    validation_datagen = image.ImageDataGenerator(rotation_range=10, width_shift_range=0.2, height_shift_range=0.2)

    train_datagen.fit(X_train)
    validation_datagen.fit(X_test)

    train_generate = train_datagen.flow(X_train, Y_train, batch_size=batch_size)
    validation_generate = train_datagen.flow(X_test, Y_test, batch_size=batch_size)

    model.fit_generator(train_generate, steps_per_epoch=X_train.shape[0] // batch_size, epochs=nb_epoch, verbose=1,
                        validation_data=validation_generate, validation_steps=X_test.shape[0] // batch_size, workers=1,
                        use_multiprocessing=False)

    test_loss, test_acc = model.evaluate_generator(validation_generate, steps=X_test.shape[0] // batch_size, workers=1,
                                                   use_multiprocessing=False)

    logger.info('Test accuracy: {0} {1}'.format(test_loss, test_acc))

    predictions = model.predict(X_test)
    predictions = np.argmax(predictions, 1)
    labels = np.argmax(Y_test, 1)
    for i in range(10):
        logger.info("predict:{0} , label:{1}".format(predictions[i], labels[i]))
コード例 #14
0
def mnist_conv():
    """
    使用卷积神经网络训练图片分类
    :return:
    """
    batch_size = 128
    nb_classes = 10  # 分类数
    nb_epoch = 20  # 训练轮数
    # 输入图片的维度
    img_rows, img_cols = 28, 28
    # 卷积滤镜的个数
    nb_filters = 32
    # 最大池化,池化核大小
    pool_size = (2, 2)
    # 卷积核大小
    kernel_size = (3, 3)

    (X_train, y_train), (X_test, y_test) = mnist.load_data(os.path.join(root_path, "data", "mnist", "mnist.npz"))

    if K.image_dim_ordering() == 'th':
        # 使用 Theano 的顺序:(conv_dim1, channels, conv_dim2, conv_dim3)
        X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        # 使用 TensorFlow 的顺序:(conv_dim1, conv_dim2, conv_dim3, channels)
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    model = keras.models.Sequential()
    # 可分离卷积
    # model.add(SeparableConv2D(nb_filters, kernel_size, padding='valid', activation="relu",
    #                           input_shape=input_shape, data_format="channels_last"))
    model.add(Convolution2D(nb_filters, kernel_size, padding='valid', input_shape=input_shape,
                            data_format="channels_last", activation="relu"))
    model.add(Convolution2D(nb_filters, kernel_size, padding="valid", activation="relu",
                            data_format="channels_last"))
    model.add(MaxPooling2D(pool_size=pool_size))

    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation="relu"))
    model.add(Dropout(0.25))
    model.add(Dense(nb_classes, activation="softmax"))

    model.summary()
    # keras.utils.vis_utils.plot_model(model, to_file="keras_mnist_cnn.png")

    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    os.makedirs(os.path.join(root_path, "tmp", "mnist", "logs"), exist_ok=True)
    tensorBoard_callback = keras.callbacks.TensorBoard(os.path.join(root_path, "tmp", "mnist", "logs"),
                                                       batch_size=batch_size, write_images=True, write_graph=True)
    os.makedirs(os.path.join(root_path, "tmp", "mnist", "models"), exist_ok=True)
    model_checkpoint = keras.callbacks.ModelCheckpoint(
        os.path.join(root_path, "tmp", "mnist", "models", "mnist_model_{epoch:02d}-{val_acc:.4f}.h5"),
        save_best_only=False, save_weights_only=False, monitor='val_acc')
    model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch,
              verbose=1, validation_data=(X_test, Y_test), callbacks=[tensorBoard_callback, model_checkpoint])

    test_loss, test_acc = model.evaluate(X_test, Y_test, verbose=1)

    logger.info('Test accuracy: {0} {1}'.format(test_loss, test_acc))

    predictions = model.predict(X_test)
    predictions = np.argmax(predictions, 1)
    labels = np.argmax(Y_test, 1)
    for i in range(10):
        logger.info("predict:{0} , label:{1}".format(predictions[i], labels[i]))