示例#1
0
def Scaling(path, image_save_dir, aug_number):
    datagen = ImageDataGenerator(zoom_range=0.3, fill_mode='constant')  # 0.3
    img = load_img(path, grayscale=True)  # 这是一个PIL图像
    x = img_to_array(img)  # 把PIL图像转换成一个numpy数组
    x = x.reshape((1, ) + x.shape)  # 这是一个numpy数组
    image_dir = os.path.dirname(path)
    image_name = os.path.basename(path)
    prefix = image_name.split(".")[0]
    i = 1
    for batch in datagen.flow(x,
                              batch_size=1,
                              save_to_dir=image_save_dir,
                              save_prefix=prefix + "_aug_" + 'Scal',
                              save_format='jpg'):
        i += 1
        if i > aug_number:
            break  # 否则生成器会退出循环
示例#2
0
def Random_translation(path, image_save_dir, aug_number):
    datagen = ImageDataGenerator(width_shift_range=0.05,
                                 height_shift_range=0.05)  # 0.2
    img = load_img(path, grayscale=True)  # 这是一个PIL图像
    x = img_to_array(img)  # 把PIL图像转换成一个numpy数组
    x = x.reshape((1, ) + x.shape)  # 这是一个numpy数组
    image_dir = os.path.dirname(path)
    image_name = os.path.basename(path)
    prefix = image_name.split(".")[0]
    i = 1
    for batch in datagen.flow(x,
                              batch_size=1,
                              save_to_dir=image_save_dir,
                              save_prefix=prefix + "_aug_" + 'trans',
                              save_format='jpg'):
        i += 1
        if i > aug_number:
            break  # 否则生成器会退出循环
示例#3
0
def Random_affine_transform(path, image_save_dir, aug_number):
    datagen = ImageDataGenerator(
        shear_range=3)  # 水平或垂直投影变换,shear_range是角度范围 #5
    img = load_img(path, grayscale=True)  # 这是一个PIL图像
    x = img_to_array(img)  # 把PIL图像转换成一个numpy数组
    x = x.reshape((1, ) + x.shape)  # 这是一个numpy数组
    image_dir = os.path.dirname(path)
    image_name = os.path.basename(path)
    prefix = image_name.split(".")[0]
    i = 1
    for batch in datagen.flow(x,
                              batch_size=1,
                              save_to_dir=image_save_dir,
                              save_prefix=prefix + "_aug_" + 'aff',
                              save_format='jpg'):
        i += 1
        if i > aug_number:
            break  # 否则生成器会退出循环
        height_shift_range=
        0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False,
        zerosquare=True,
        zerosquareh=noises,
        zerosquarew=noises,
        zerosquareintern=0.0)  # randomly flip images
    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)
    # fit the model on the batches generated by datagen.flow()

    if weighted:
        model.fit_generator(datagen.flow(X_train,
                                         Y_train,
                                         batch_size=batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=nb_epoch,
                            validation_data=(X_test, Y_test),
                            callbacks=callbacks_list,
                            class_weight=[weights[0], weights[1]])
    else:
        print(Y_train.shape)
        model.fit_generator(datagen.flow(X_train,
                                         Y_train,
                                         batch_size=batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=nb_epoch,
                            validation_data=(X_test, Y_test),
                            callbacks=callbacks_list)
示例#5
0
    reduce_lr = ReduceLROnPlateau(monitor='val_f1_score',
                                  factor=np.sqrt(0.1),
                                  patience=reduce_lr_patience,
                                  verbose=2,
                                  mode='max',
                                  cooldown=1)

    model.compile(loss=losses[loss_name],
                  optimizer=opt,
                  metrics=[f1_score, precision, recall])
    # Start fitting model
    fold = 50
    epoch = 1e4
    print(" Fine tune " + model_name + ": \n")
    model.fit_generator(datagen.flow(x_train,
                                     '../data/train_data',
                                     width,
                                     y_train,
                                     batch_size=batch_size),
                        steps_per_epoch=len(x_train) / batch_size / fold,
                        validation_data=val_datagen.flow(
                            x_val,
                            '../data/val_data',
                            width,
                            y_val,
                            batch_size=batch_size),
                        validation_steps=len(x_val) / batch_size,
                        epochs=epoch,
                        callbacks=[early_stopping, reduce_lr, checkpointer],
                        workers=4)
示例#6
0
    patience = 5  # reduce_lr_patience+1 + 1
    early_stopping = EarlyStopping(
        monitor='val_f1_score', patience=patience, verbose=2, mode='max')
    checkpointer = ModelCheckpoint(
        filepath=f'../models/{model_name}_{loss_name}.h5',
        monitor='val_f1_score', verbose=0, mode='max', save_best_only=True)
    reduce_lr = ReduceLROnPlateau(monitor='val_f1_score',
                                  factor=np.sqrt(0.1),
                                  patience=reduce_lr_patience,
                                  verbose=2, mode='max', cooldown=1)

    model.compile(
        loss=losses[loss_name],
        optimizer=opt,
        metrics=[f1_score, precision, recall])
    # Start fitting model
    batch_size = 16
    fold = 50
    epoch = 1e4
    print(" Fine tune " + model_name + ": \n")
    model.fit_generator(
        datagen.flow(x_train, '../data/train_data', width,
                     y_train, batch_size=batch_size),
        steps_per_epoch=len(x_train) / batch_size / fold,
        validation_data=val_datagen.flow(
            x_val, '../data/val_data', width, y_val, batch_size=batch_size),
        validation_steps=len(x_val) / batch_size,
        epochs=epoch,
        callbacks=[early_stopping, reduce_lr, checkpointer],
        workers=4)
示例#7
0
batch_y = y_val
for i, j in enumerate(tqdm(index_array)):
    s_img = cv2.imread(f'../data/val_data/{j+1}.jpg')
    b, g, r = cv2.split(s_img)  # get b,g,r
    rgb_img = cv2.merge([r, g, b])  # switch it to rgb
    x = resizeAndPad(rgb_img, (width, width))
    batch_x[i] = x

model_names = ['Xception_f1_59', 'Xception_f1_5945']
for model_name in model_names:
    with CustomObjectScope({
            'f1_loss': f1_loss,
            'f1_score': f1_score,
            'precision': precision,
            'recall': recall
    }):
        model = load_model(f'../models/{model_name}.h5')

    # y_pred_val = model.predict(batch_x, verbose=1)
    # print(model_name, f1_score(y_val, y_pred_val))

    val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
    y_pred_val = model.predict_generator(val_datagen.flow(x_val,
                                                          '../data/val_data',
                                                          width,
                                                          y_val,
                                                          batch_size=3,
                                                          shuffle=False),
                                         verbose=1)
    print(model_name, f1_score_np(y_val, y_pred_val))
示例#8
0
    data['imageId'] = data['imageId'].astype(np.uint32)

mlb = MultiLabelBinarizer()
train_label = mlb.fit_transform(train['labelId'])

y_test = np.zeros((39706, 228))
x_test = np.arange(y_test.shape[0]) + 1
width = 224

model_name = 'Xception'
# with CustomObjectScope({'f1_loss': f1_loss, 'f1_score': f1_score, 'precision': precision, 'recall': recall}):
#     model = load_model(f'../models/{model_name}_f1.h5')
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
y_pred_test = model.predict_generator(test_datagen.flow(x_test,
                                                        '../data/test_data',
                                                        width,
                                                        y_test,
                                                        batch_size=1,
                                                        shuffle=False),
                                      verbose=1)
np.save(f'../data/json/y_pred_{model_name}', y_pred_test)

# y_pred_test_xe = y_pred_test.copy()
# y_pred_test = (y_pred_test_xe + y_pred_test_in) / 2

y_pred_test1 = np.round(y_pred_test)
where_1 = mlb.inverse_transform(y_pred_test1)

file = open('../data/json/test.csv', 'w')
file.write('image_id,label_id\n')
for i in x_test:
    where_one = where_1[i - 1]