コード例 #1
0
ファイル: ssd7_trainer.py プロジェクト: SpikeKing/ssd_keras
    def build_ssd7_model(self):
        intensity_mean = 127.5  # 像素减去
        intensity_range = 127.5  # 像素除以

        K.clear_session()  # Clear previous models from memory.

        model = build_model(image_size=(self.img_height, self.img_width,
                                        self.img_channels),
                            n_classes=self.n_classes,
                            mode='training',
                            l2_regularization=0.0005,
                            scales=self.scales,
                            aspect_ratios_global=self.aspect_ratios,
                            aspect_ratios_per_layer=None,
                            variances=self.variances,
                            normalize_coords=self.normalize_coords,
                            subtract_mean=intensity_mean,
                            divide_by_stddev=intensity_range)

        # model.load_weights('./ssd7_weights.h5', by_name=True)  # 迁移学习,微调参数

        adam = Adam(lr=0.001,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=1e-08,
                    decay=0.0)

        ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)

        model.compile(optimizer=adam, loss=ssd_loss.compute_loss)

        return model
コード例 #2
0
def run():
    get_2d_conv_model(config)
    X_train = prepare_data(train, config, '../input/audio_train/')
    X_test = prepare_data(test, config, '../input/audio_test/')
    y_train = to_categorical(train.label_idx, num_classes=config.n_classes)

    X_train = normalize_data(X_train)
    X_test = normalize_data(X_test)

    PREDICTION_FOLDER = "predictions_2d_conv"
    if not os.path.exists(PREDICTION_FOLDER):
        os.mkdir(PREDICTION_FOLDER)
    if os.path.exists('logs/' + PREDICTION_FOLDER):
        shutil.rmtree('logs/' + PREDICTION_FOLDER)

    skf = StratifiedKFold(train.label_idx, n_folds=config.n_folds)
    for i, (train_split, val_split) in enumerate(skf):
        K.clear_session()
        X, y, X_val, y_val = X_train[train_split], y_train[train_split], X_train[val_split], y_train[val_split]

        checkpoint = ModelCheckpoint('best_%d.h5' % i, monitor='val_loss', verbose=1, save_best_only=True)

        early = EarlyStopping(monitor="val_loss", mode="min", patience=5)

        tb = TensorBoard(log_dir='./logs/' + PREDICTION_FOLDER + '/fold_%i' % i, write_graph=True)

        callbacks_list = [checkpoint, early, tb]

        print("#" * 50)
        print("Fold: ", i)

        model = get_2d_conv_model(config)

        history = model.fit(X, y, validation_data=(X_val, y_val), callbacks=callbacks_list,
                            batch_size=64, epochs=config.max_epochs)
        model.load_weights('best_%d.h5' % i)

        # Save train predictionsfc
        predictions = model.predict(X_train, batch_size=64, verbose=1)
        np.save(PREDICTION_FOLDER + "/train_predictions_%d.npy" % i, predictions)

        # Save test predictions
        predictions = model.predict(X_test, batch_size=64, verbose=1)
        np.save(PREDICTION_FOLDER + "/test_predictions_%d.npy" % i, predictions)

        # Make a submission file
        top_3 = np.array(LABELS)[np.argsort(-predictions, axis=1)[:, :3]]
        predicted_labels = [' '.join(list(x)) for x in top_3]
        test['label'] = predicted_labels
        test[['label']].to_csv(PREDICTION_FOLDER + "/predictions_%d.csv" % i)
コード例 #3
0
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from keras.layers import K, Activation
from keras.engine import Layer
from keras.layers import Dense, Input, Embedding, Dropout, Bidirectional, GRU, Flatten, SpatialDropout1D

'''K.clear_session()
gru_len = 128
Routings = 5
Num_capsule = 10
Dim_capsule = 16
dropout_p = 0.25
rate_drop_dense = 0.28'''

K.clear_session()
gru_len = 128
Routings = 5
Num_capsule = 10
Dim_capsule = 16
dropout_p = 0.25
rate_drop_dense = 0.28


def squash(x, axis=-1):
    # s_squared_norm is really small
    # s_squared_norm = K.sum(K.square(x), axis, keepdims=True) + K.epsilon()
    # scale = K.sqrt(s_squared_norm)/ (0.5 + s_squared_norm)
    # return scale * x
    s_squared_norm = K.sum(K.square(x), axis, keepdims=True)
    scale = K.sqrt(s_squared_norm + K.epsilon())
コード例 #4
0
ファイル: application.py プロジェクト: maxim-xu/img-process
def feature_extract_TSNE(labels, all_img_list):
    allimages = []  # image files
    imglabel = []  # image labels
    num_label = 0
    for sub_img_list in all_img_list:
        sub_imgs = []
        for image in sub_img_list:
            img = Image.open(image)
            width, height = img.size
            if width != 190 or height != 190:
                img.thumbnail((190, 190), Image.ANTIALIAS)
            img = np.asarray(img, dtype=np.float32).reshape(190, 190, 1)
            img -= np.mean(img)
            sub_imgs.append(img)
        imglabel = np.concatenate((imglabel, np.ones(
            (len(sub_imgs))) * num_label),
                                  axis=0)
        num_label += 1
        allimages.append(sub_imgs)

    allimages = np.asarray(allimages).reshape(-1, 190, 190, 1)
    imglabel = np.asarray(imglabel)

    autoencoder = ae_encoder()

    # Load weights
    autoencoder.load_weights('h5/ACbin_33x128fl128GA_weights.h5')
    autoencoder._make_predict_function()
    batch_size = 10

    # Extract output
    intermediate_layer_model = Model(
        inputs=autoencoder.input,
        outputs=autoencoder.get_layer('globalAve').output)

    intermediate_layer_model.compile('sgd', 'mse')
    # Output the latent layer
    print("before predict")
    intermediate_output = intermediate_layer_model.predict(
        allimages, batch_size=batch_size, verbose=1)
    print("after predict")

    K.clear_session()

    # TSNE
    Y0 = TSNE(n_components=2,
              init='random',
              random_state=0,
              perplexity=30,
              verbose=1).fit_transform(
                  intermediate_output.reshape(intermediate_output.shape[0],
                                              -1))

    # Output scatter plot
    df = pandas.DataFrame(dict(x=Y0[:, 0], y=Y0[:, 1], label=imglabel))
    groups = df.groupby('label')

    # Plot grouped scatter
    fig, ax = plt.subplots()
    ax.margins(0.05)  # Optional, just adds 5% padding to the autoscaling

    i = 0
    for name, group in groups:
        name = labels[i]
        ax.plot(group.x,
                group.y,
                marker='o',
                linestyle='',
                ms=2,
                label=name,
                alpha=0.5)
        i += 1

    # Plot features
    plt.title('tSNE plot')
    ax.legend()

    # Encode, decode and output
    png = BytesIO()
    plt.savefig(png, format='png')
    png.seek(0)
    plot_url = base64.b64encode(png.getvalue()).decode()
    return plot_url