def valid_model(valid_path_list, model, nb_classes, pic_shape):
    np.random.shuffle(valid_path_list)
    # 不用测试所有的样本, 只需随机测试一部分就可以
    valid_load_num = min(1, len(valid_path_list) / one_load_sample_num)
    # valid_load_num = len(valid_path_list) / one_load_sample_num
    all_acc = []
    if valid_load_num == 0:
        X_valid, y_valid = load_data_from_list(valid_path_list, pic_shape, need_augment=need_augment)
        Y_valid = np_utils.to_categorical(y_valid, nb_classes=nb_classes)
        # if K.image_dim_ordering() != 'th':
        #     X_valid = np.transpose(X_valid, (0, 3, 1, 2))
        Y_predict_batch = model.predict(X_valid, batch_size=batch_size, verbose=1)
        test_acc = accuracy_score(np.argmax(Y_valid, axis=1), np.argmax(Y_predict_batch, axis=1))
        all_acc.append(test_acc)
    else:
        for valid_load_index in range(valid_load_num):
            X_valid, y_valid = load_data_from_list(valid_path_list[
                        valid_load_index*one_load_sample_num:(valid_load_index+1)*one_load_sample_num],
                                                   pic_shape, need_augment=need_augment)
            Y_valid = np_utils.to_categorical(y_valid, nb_classes=nb_classes)
            # if K.image_dim_ordering() != 'th':
            #     X_valid = np.transpose(X_valid, (0, 3, 1, 2))
            Y_predict_batch = model.predict(X_valid, batch_size=batch_size, verbose=1)
            test_acc = accuracy_score(np.argmax(Y_valid, axis=1), np.argmax(Y_predict_batch, axis=1))
            all_acc.append(test_acc)
    mean_acc = np.min(all_acc)
    return mean_acc
示例#2
0
def valid_model(valid_path_list, model, nb_classes, pic_shape):
    np.random.shuffle(valid_path_list)
    # 不用测试所有的样本, 只需随机测试一部分就可以
    valid_load_num = min(2, len(valid_path_list) / one_load_sample_num)

    all_loss = []

    for valid_load_index in range(valid_load_num):
        X_valid, y_valid = load_data_from_list(
            valid_path_list[valid_load_index *
                            one_load_sample_num:(valid_load_index + 1) *
                            one_load_sample_num], pic_shape)

        for X_batch, y_valid in datagen.flow(X_valid,
                                             y_valid,
                                             batch_size=batch_size):
            valid_pairs, valid_label, X_valid_first, y_valid_first, X_valid_second, \
            y_valid_second = create_pairs(X_valid, y_valid)
            pair_num = valid_pairs.shape[0]

            this_batch_num = pair_num / small_batch_size
            for k in range(this_batch_num):
                loss = model.predict_on_batch([
                    valid_pairs[k * small_batch_size:(k + 1) *
                                small_batch_size, 0],
                    valid_pairs[k * small_batch_size:(k + 1) *
                                small_batch_size, 1]
                ])
                all_loss.append(loss[2])

    mean_loss = np.min(all_loss)
    return mean_loss
def valid_model(valid_path_list, model, nb_classes, pic_shape):
    np.random.shuffle(valid_path_list)
    # 不用测试所有的样本, 只需随机测试一部分就可以
    valid_load_num = min(2, len(valid_path_list) / one_load_sample_num)

    all_loss = []

    for valid_load_index in range(valid_load_num):
        X_valid, y_valid = load_data_from_list(valid_path_list[
                        valid_load_index*one_load_sample_num:(valid_load_index+1)*one_load_sample_num], pic_shape)

        for X_batch, y_valid in datagen.flow(X_valid, y_valid, batch_size=batch_size):
            valid_pairs, valid_label, X_valid_first, y_valid_first, X_valid_second, \
            y_valid_second = create_pairs(X_valid, y_valid)
            pair_num = valid_pairs.shape[0]

            this_batch_num = pair_num / small_batch_size
            for k in range(this_batch_num):
                loss = model.predict_on_batch(
                    [
                        valid_pairs[k * small_batch_size:(k + 1) * small_batch_size, 0],
                        valid_pairs[k * small_batch_size:(k + 1) * small_batch_size, 1]
                    ]
                )
                all_loss.append(loss[2])

    mean_loss = np.min(all_loss)
    return mean_loss
def valid_model(valid_path_list, model, nb_classes, pic_shape):
    np.random.shuffle(valid_path_list)
    valid_load_num = len(valid_path_list) / one_load_sample_num
    all_acc = []
    for valid_load_index in range(valid_load_num):
        X_valid, y_valid = load_data_from_list(valid_path_list[
                        valid_load_index*one_load_sample_num:(valid_load_index+1)*one_load_sample_num], pic_shape)
        Y_valid = np_utils.to_categorical(y_valid, nb_classes=nb_classes)
        Y_predict_batch = model.predict(X_valid, batch_size=batch_size, verbose=1)
        test_acc = accuracy_score(np.argmax(Y_valid, axis=1), np.argmax(Y_predict_batch, axis=1))
        all_acc.append(test_acc)

    X_valid, y_valid = load_data_from_list(valid_path_list[valid_load_num*one_load_sample_num:4096], pic_shape)
    Y_valid = np_utils.to_categorical(y_valid, nb_classes=nb_classes)

    Y_predict_batch = model.predict(X_valid, batch_size=batch_size, verbose=1)

    test_acc = accuracy_score(np.argmax(Y_valid, axis=1), np.argmax(Y_predict_batch, axis=1))
    all_acc.append(test_acc)

    mean_acc = np.min(all_acc)
    return mean_acc
def train_model(train_path_list, model, nb_classes, pic_shape):
    length = len(train_path_list)
    train_load_num = length / one_load_sample_num
    np.random.shuffle(train_path_list)
    for train_load_index in range(train_load_num):
        try:
            X_train, y_train = load_data_from_list(train_path_list[
                            train_load_index*one_load_sample_num:
                            (train_load_index+1)*one_load_sample_num], pic_shape, need_augment=True)
            Y_train = np_utils.to_categorical(y_train, nb_classes)
            # 数据的augment放在load_data中实现
            model.fit(X_train, Y_train, batch_size=batch_size, shuffle=True, nb_epoch=3)
        except:
            traceback.print_exc()
            continue
def train_model(train_path_list, model, nb_classes, pic_shape):
    length = len(train_path_list)
    train_load_num = length / one_load_sample_num + 1
    np.random.shuffle(train_path_list)
    for train_load_index in range(train_load_num):
        try:
            X_train, y_train = load_data_from_list(train_path_list[
                            train_load_index*one_load_sample_num:
                            (train_load_index+1)*one_load_sample_num], pic_shape, need_augment=need_augment)
            Y_train = np_utils.to_categorical(y_train, nb_classes)
            # 数据的augment放在load_data中实现
            # if K.image_dim_ordering() != 'th':
            #     X_train = np.transpose(X_train, (0, 3, 1, 2))
            model.fit(X_train, Y_train, batch_size=batch_size, shuffle=True, nb_epoch=1, verbose=1)
        except:
            traceback.print_exc()
            continue
示例#7
0
def train_model(train_path_list, model, nb_classes, pic_shape):
    length = len(train_path_list)
    train_load_num = length / one_load_sample_num
    np.random.shuffle(train_path_list)
    progbar = generic_utils.Progbar(length)
    for train_load_index in range(train_load_num):
        try:
            X_train, y_train = load_data_from_list(
                train_path_list[train_load_index *
                                one_load_sample_num:(train_load_index + 1) *
                                one_load_sample_num], pic_shape)

            datagen.fit(X_train, augment=False)
            for X_batch, y_train in datagen.flow(X_train,
                                                 y_train,
                                                 batch_size=batch_size):
                train_pairs, train_label, X_train_first, y_train_first, X_train_second, \
                        y_train_second = create_pairs(X_train, y_train)
                y_train_first = np_utils.to_categorical(
                    y_train_first, nb_classes)
                y_train_second = np_utils.to_categorical(
                    y_train_second, nb_classes)
                pair_num = train_pairs.shape[0]

                this_batch_num = pair_num / small_batch_size
                for k in range(this_batch_num):
                    loss = model.train_on_batch([
                        train_pairs[k * small_batch_size:(k + 1) *
                                    small_batch_size, 0],
                        train_pairs[k * small_batch_size:(k + 1) *
                                    small_batch_size, 1]
                    ], [
                        train_label[k * small_batch_size:(k + 1) *
                                    small_batch_size],
                        y_train_first[k * small_batch_size:(k + 1) *
                                      small_batch_size, :],
                        y_train_second[k * small_batch_size:(k + 1) *
                                       small_batch_size, :]
                    ])
                    print loss
                    # progbar.add(X_batch.shape[0], values=[('train loss', loss[2])])
                break
        except:
            traceback.print_exc()
            continue
示例#8
0
def train_model(train_path_list, model, nb_classes, pic_shape):
    length = len(train_path_list)
    train_load_num = length / one_load_sample_num
    np.random.shuffle(train_path_list)
    for train_load_index in range(train_load_num):
        try:
            X_train, y_train = load_data_from_list(
                train_path_list[train_load_index *
                                one_load_sample_num:(train_load_index + 1) *
                                one_load_sample_num],
                pic_shape,
                need_augment=True)
            Y_train = np_utils.to_categorical(y_train, nb_classes)
            # 数据的augment放在load_data中实现
            model.fit(X_train,
                      Y_train,
                      batch_size=batch_size,
                      shuffle=True,
                      nb_epoch=3)
        except:
            traceback.print_exc()
            continue
def train_model(train_path_list, model, nb_classes, pic_shape):
    length = len(train_path_list)
    train_load_num = length / one_load_sample_num
    np.random.shuffle(train_path_list)
    progbar = generic_utils.Progbar(length)
    for train_load_index in range(train_load_num):
        try:
            X_train, y_train = load_data_from_list(train_path_list[
                            train_load_index*one_load_sample_num: (train_load_index+1)*one_load_sample_num], pic_shape)
            Y_train = np_utils.to_categorical(y_train, nb_classes)
            print X_train.shape, Y_train.shape
            # model.fit(X_train, Y_train, batch_size=batch_size, shuffle=True, validation_split=0.1, nb_epoch=2)
            datagen.fit(X_train, augment=False)
            sample_num = 0
            for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=batch_size):
                loss = model.train_on_batch(X_batch, Y_batch)
                progbar.add(X_batch.shape[0], values=[('train loss', loss)])
                sample_num += X_batch.shape[0]
                if sample_num >= X_train.shape[0]:
                    break
        except:
            traceback.print_exc()
            continue
def train_model(train_path_list, model, nb_classes, pic_shape):
    length = len(train_path_list)
    train_load_num = length / one_load_sample_num
    np.random.shuffle(train_path_list)
    progbar = generic_utils.Progbar(length)
    for train_load_index in range(train_load_num):
        try:
            X_train, y_train = load_data_from_list(train_path_list[
                            train_load_index*one_load_sample_num: (train_load_index+1)*one_load_sample_num], pic_shape)

            datagen.fit(X_train, augment=False)
            for X_batch, y_train in datagen.flow(X_train, y_train, batch_size=batch_size):
                train_pairs, train_label, X_train_first, y_train_first, X_train_second, \
                        y_train_second = create_pairs(X_train, y_train)
                y_train_first = np_utils.to_categorical(y_train_first, nb_classes)
                y_train_second = np_utils.to_categorical(y_train_second, nb_classes)
                pair_num = train_pairs.shape[0]

                this_batch_num = pair_num / small_batch_size
                for k in range(this_batch_num):
                    loss = model.train_on_batch(
                        [
                            train_pairs[k*small_batch_size:(k+1)*small_batch_size, 0],
                            train_pairs[k*small_batch_size:(k+1)*small_batch_size, 1]
                        ],
                        [
                            train_label[k*small_batch_size:(k+1)*small_batch_size],
                            y_train_first[k*small_batch_size:(k+1)*small_batch_size, :],
                            y_train_second[k*small_batch_size:(k+1)*small_batch_size, :]
                        ]
                    )
                    print loss
                    # progbar.add(X_batch.shape[0], values=[('train loss', loss[2])])
                break
        except:
            traceback.print_exc()
            continue