コード例 #1
0
def preprocess():
    # # Splitting dataset in train,test and validate
    # x_train,y_train,x_test,y_test,valid_set_x,valid_set_y = split(training_per=0.8,test_per=0.1,validation_per=0.1)

    # x_train = x_train.reshape(x_train.shape[0],32,32,1)
    # x_test = x_test.reshape(x_test.shape[0],32,32,1)
    # valid_set_x = valid_set_x.reshape(valid_set_x.shape[0],32,32,1)
    # # print(x_train.shape)
    # # print(x_test.shape)
    # # print(valid_set_x.shape)

    # hot_enc = one_hot_encoding()
    # y_train = [hot_enc[int(x)] for x in y_train]
    # y_train = np.asarray(y_train)
    # y_train = y_train.reshape(x_train.shape[0], 10)

    # y_test = [hot_enc[int(x)] for x in y_test]
    # y_test = np.asarray(y_test)
    # y_test = y_test.reshape(x_test.shape[0], 10)

    # y_val = [hot_enc[int(x)] for x in valid_set_y]
    # y_val = np.asarray(y_val)
    # valid_set_y = y_val.reshape(valid_set_x.shape[0], 10)

    import tflearn.datasets.oxflower17 as oxflower17
    X, Y = oxflower17.load_data(one_hot=True)

    x_train, x_test_pre, y_train, y_test_pre = train_test_split(
        X, Y, test_size=0.20, random_state=42)
    x_test, x_validation, y_test, y_validation = train_test_split(
        x_test_pre, y_test_pre, test_size=0.1)

    return x_train, y_train, x_test, y_test, x_validation, y_validation
コード例 #2
0
ファイル: cnn.py プロジェクト: Neo01010/deep-learning
def alexnet():
    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

    # Building 'AlexNet'
    network = input_data(shape=[None, 227, 227, 3])
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 17, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Training
    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=2)
    model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True,
              show_metric=True, batch_size=64, snapshot_step=200,
              snapshot_epoch=False, run_id='alexnet')
コード例 #3
0
def pre_train():
    if os.path.isfile('models/pre_train.model.index'):
        print("Previous trained model exist.")
        return

    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))
    net = create_alexnet(17)
    model = tflearn.DNN(net,
                        checkpoint_path='ckps/pre_train.ckp',
                        max_checkpoints=1,
                        tensorboard_verbose=2,
                        tensorboard_dir='tmp/pre_train_logs/')
    if os.path.isfile('models/pre_train.model'):
        model.load('models/pre_train.model')
    model.fit(X,
              Y,
              n_epoch=100,
              validation_set=0.1,
              shuffle=True,
              show_metric=True,
              batch_size=64,
              snapshot_step=200,
              snapshot_epoch=False,
              run_id='pre_train')
    # Save the model
    model.save('models/pre_train.model')
コード例 #4
0
def unmatch_data(X_data,
                 y_data,
                 unmatch_per,
                 oriset='cifar10',
                 name='oxflower17'):

    size1 = round(train_data_num * (1 - unmatch_per))
    X1, y1 = random_data(X_data, y_data, size1)

    size2 = round(train_data_num * unmatch_per)
    if name == 'FashionMNIST':
        (X_, y_), (X_test1, y_test1) = fashion_mnist.load_data()
        X2, y2 = random_data(X_, y_, size2)
    if name == 'oxflower17':
        X_, y_ = oxflower17.load_data(dirname="17flowers", one_hot=False)
        X_ = np.array([cv2.resize(i, (32, 32)) for i in X_])
        y_ = np.array([random.randint(0, 9) for _ in range(y_.shape[0])])
        print("oxflower y shape: ", y_.shape)
        if size2 > y_.shape[0]:
            rnd = size2 // y_.shape[0] + 1
            X_new = X_
            y_new = y_
            for _ in range(rnd):
                X_new = np.concatenate([X_new, X_])
                y_new = np.concatenate([y_new, y_])
        X2, y2 = random_data(X_new, y_new, size2)

    print("Unmatched dataset shape: ", X2.shape)
    return np.concatenate([X1, X2]), np.concatenate([y1, y2])
コード例 #5
0
def vggnet():
    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))
    input = input_data(shape=[None, 227, 227, 3], name='input')

    conv1 = conv_2d(input, 64, 3, activation='relu')
    conv2 = conv_2d(conv1, 64, 3, activation='relu')
    pool1 = max_pool_2d(conv2, 2, strides=2)

    conv3 = conv_2d(pool1, 128, 3, activation='relu')
    conv4 = conv_2d(conv3, 128, 3, activation='relu')
    pool2 = max_pool_2d(conv4, 2, strides=2)

    conv5 = conv_2d(pool2, 256, 3, activation='relu')
    conv6 = conv_2d(conv5, 256, 3, activation='relu')
    conv7 = conv_2d(conv6, 256, 3, activation='relu')
    pool3 = max_pool_2d(conv7, 2, strides=2)

    conv8 = conv_2d(pool3, 512, 3, activation='relu')
    conv9 = conv_2d(conv8, 512, 3, activation='relu')
    conv10 = conv_2d(conv9, 512, 3, activation='relu')
    pool4 = max_pool_2d(conv10, 2, strides=2)

    conv11 = conv_2d(pool4, 512, 3, activation='relu')
    conv12 = conv_2d(conv11, 512, 3, activation='relu')
    conv13 = conv_2d(conv12, 512, 3, activation='relu')
    pool5 = max_pool_2d(conv13, 2, strides=2)

    lrn = local_response_normalization(pool5)
    fc1 = fully_connected(lrn, 4096, activation='tanh')
    dr1 = dropout(fc1, 0.5)
    fc2 = fully_connected(dr1, 4096, activation='relu')
    dr2 = dropout(fc2, 0.5)
    fc3 = fully_connected(dr2, 17, activation='softmax')
    # 声明优化算法、损失函数、学习率等
    network = regression(fc3,
                         optimizer='rmsprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.01)

    # Training
    model = tflearn.DNN(network,
                        checkpoint_path='model_vgg',
                        max_checkpoints=10,
                        tensorboard_verbose=2,
                        tensorboard_dir='logs/vgg')
    # n_epoch=10表示整个训练数据集将会用10遍,
    # batch_size=16表示一次用16个数据计算参数的更新
    model.fit(X,
              Y,
              n_epoch=1,
              validation_set=0.1,
              shuffle=True,
              show_metric=True,
              batch_size=64,
              snapshot_step=200,
              snapshot_epoch=False,
              run_id='vgg')
コード例 #6
0
ファイル: utils.py プロジェクト: Asun0204/tensorfow-vgg16
def get_oxflower17_data(num_train=1000, num_validation=180, num_test=180):
    # Load the raw oxflower17 data
    X, y = oxflower17.load_data()

    # Shuffle the data

    # Subsample the data

    return X_train, y_train, X_val, y_val, X_test, y_test
コード例 #7
0
ファイル: cnnTF.py プロジェクト: Renwoxin/webDL
def vggnet():
    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

    # Building 'VGG Network'
    network = input_data(shape=[None, 227, 227, 3])

    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 128, 3, activation='relu')
    network = conv_2d(network, 128, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 17, activation='softmax')

    network = regression(network,
                         optimizer='rmsprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)

    # Training
    model = tflearn.DNN(network,
                        checkpoint_path='model_vgg',
                        max_checkpoints=1,
                        tensorboard_verbose=0)
    model.fit(X,
              Y,
              n_epoch=500,
              shuffle=True,
              show_metric=True,
              batch_size=32,
              snapshot_step=500,
              snapshot_epoch=False,
              run_id='vgg')
コード例 #8
0
def get_data_oxford_flowers():
	import tflearn.datasets.oxflower17 as oxflower17
	X, Y = oxflower17.load_data(one_hot = True, resize_pics = (227, 227))

	split_percentage = 80
	split_index = int(X.shape[0]/(100/split_percentage))

	x_train = np.array(X[:split_index])
	x_val = np.array(X[split_index:])

	y_train = np.array(Y[:split_index])
	y_val = np.array(Y[split_index:])
	return x_train, x_val, y_train, y_val 
コード例 #9
0
def train():
    x, y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))
    model = tflearn_model()
    model.fit(x,
              y,
              n_epoch=1000,
              validation_set=0.1,
              shuffle=True,
              show_metric=True,
              batch_size=512,
              snapshot_step=200,
              snapshot_epoch=False,
              run_id='alexnet_oxflowers17')
コード例 #10
0
def load_data():
    Xtrain, Ytrain = oxflower17.load_data(one_hot=True)

    input_shape = (Xtrain.shape[1], Xtrain.shape[2], Xtrain.shape[3]
                   )  # (224,224,3)
    num_classes = Ytrain.shape[1]  # 17
    print("Training input %s" % str(Xtrain.shape))
    print("Training output %s" % str(Ytrain.shape))
    #print("Test input %s" %str(Xtest.shape))
    #print("Test output %s" %str(Ytest.shape))
    print("Input shape: %s" % str(input_shape))
    print("Number of classes: %d" % num_classes)

    return [Xtrain, Ytrain, input_shape, num_classes]
コード例 #11
0
ファイル: ResNet.py プロジェクト: AbdKaan/CNN-Implementations
def load_and_transform_data(test_size=0.20):
    """
    Returns oxflower17 dataset by splitting them to train and test sets.
    """
    X, Y = oxflower17.load_data(one_hot=True)

    print("X's shape: ", X.shape)
    print("Y's shape: ", Y.shape)

    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size)
    print("X_train's shape: ", X_train.shape)
    print("Y_train's shape: ", Y_train.shape)

    return X_train, X_test, Y_train, Y_test
コード例 #12
0
def vggnet():
    """

    Returns:None

    """
    X, Y = oxflower17.load_data(resize_pics=(227, 227), one_hot=True)

    # Building 'VGG Network'
    inputs = Input(shape=(227, 227, 3))

    network = Conv2D(64, (3, 3), activation='relu')(inputs)
    network = Conv2D(64, (3, 3), activation='relu')(network)
    network = MaxPool2D(pool_size=(2, 2), strides=2)(network)

    network = Conv2D(128, (3, 3), activation='relu')(network)
    network = Conv2D(128, (3, 3), activation='relu')(network)
    network = MaxPool2D(pool_size=(2, 2), strides=2)(network)

    network = Conv2D(256, (3, 3), activation='relu')(network)
    network = Conv2D(256, (3, 3), activation='relu')(network)
    network = Conv2D(256, (3, 3), activation='relu')(network)
    network = MaxPool2D(pool_size=(2, 2), strides=2)(network)

    network = Conv2D(512, (3, 3), activation='relu')(network)
    network = Conv2D(512, (3, 3), activation='relu')(network)
    network = Conv2D(512, (3, 3), activation='relu')(network)
    network = MaxPool2D(pool_size=(2, 2), strides=2)(network)

    network = Conv2D(512, (3, 3), activation='relu')(network)
    network = Conv2D(512, (3, 3), activation='relu')(network)
    network = Conv2D(512, (3, 3), activation='relu')(network)
    network = MaxPool2D(pool_size=(2, 2), strides=2)(network)

    network = Flatten()(network)

    network = Dense(4096, activation='relu')(network)
    network = Dropout(0.5)(network)
    network = Dense(4096, activation='relu')(network)
    network = Dropout(0.5)(network)
    predictions = Dense(17, activation='softmax')(network)

    # Training
    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(X, Y, n_epoch=500, batch_size=32, validation_split=0.2)
コード例 #13
0
def main():
    x, y = oxflower17.load_data(one_hot=True)
    print('input X shape: ', x.shape)
    print('label shape ', y.shape)

    X_train, X_test, Y_train, Y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=42)

    train_accuracy, test_accuracy, parameters = model(X_train,
                                                      Y_train,
                                                      X_test,
                                                      Y_test,
                                                      num_epochs=10)
コード例 #14
0
def main():
    x, y = oxflower17.load_data(one_hot=True)
    AlexNet = create_model(img_shape=(224, 224, 3))
    print(AlexNet.summary())

    AlexNet.compile(loss='categorical_crossentropy',
                    optimizer='adam',
                    metrics=['accuracy'])

    AlexNet.fit(x,
                y,
                batch_size=64,
                epochs=100,
                verbose=1,
                validation_split=0.1,
                shuffle=True)
コード例 #15
0
    def get_data(self):
        '''
            Each image is represented as a 224x224 pixel RGB colored image. Therefore,
            each image is [3][224][224]float matrix. The dataset is already
            pre-processed so all values are (0, 1).

            Each label is a binary [17]float where each image is assigned to one of the 17
            classes of flowers.
        '''
        self.data_x, self.data_y = oxflower17.load_data(dirname="data/17flowers", one_hot=True)
        self.image_width, self.image_height, self.color_depth = \
         self.data_x.shape[1], self.data_x.shape[2], self.data_x.shape[3]
        self.n_classes = self.data_y.shape[1]
        print('[+] Loaded dataset: ')
        print('[+] Data point dimensions: ', self.data_x.shape)
        print('[+] Label dimensions: ', self.data_y.shape)
コード例 #16
0
ファイル: cnn.py プロジェクト: Emersonxuelinux/2book
def vggnet():
    X, Y = oxflower17.load_data(one_hot=True,resize_pics=(227, 227))

    # Building 'VGG Network'
    network = input_data(shape=[None, 227, 227, 3])

    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 128, 3, activation='relu')
    network = conv_2d(network, 128, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 17, activation='softmax')

    network = regression(network, optimizer='rmsprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)

    # Training
    model = tflearn.DNN(network, checkpoint_path='model_vgg',
                        max_checkpoints=1, tensorboard_verbose=0)
    model.fit(X, Y, n_epoch=500, shuffle=True,
              show_metric=True, batch_size=32, snapshot_step=500,
              snapshot_epoch=False, run_id='vgg')
コード例 #17
0
def get_oxflower17_data(num_training=1000, num_validation=180, num_test=180):
    # Load the raw oxflower17 data
    X, y = oxflower17.load_data()

    # Subsample the data
    mask = range(num_training)
    X_train = X[mask]
    y_train = y[mask]
    mask = range(num_training, num_training+num_validation)
    X_val = X[mask]
    y_val = y[mask]
    mask = range(num_training+num_validation, num_training+num_validation+num_test)
    X_test = X[mask]
    y_test = y[mask]

    # Normalize the data: subtract the mean image
    mean_image = np.mean(X_train, axis=0)
    X_train -= mean_image
    X_val -= mean_image
    X_test -= mean_image

    return X_train, y_train, X_val, y_val, X_test, y_test
コード例 #18
0
def alexnet():
    """

    Returns:None

    """
    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

    # Building 'AlexNet'
    inputs = Input(shape=(227, 227, 3))
    network = Conv2D(96, (11, 11), strides=4, activation='relu')(inputs)
    network = MaxPool2D(pool_size=(2, 2))(network)
    network = BatchNormalization()(network)
    network = Dropout(0.25)(network)
    network = Conv2D(256, (5, 5), activation='relu')(network)
    network = MaxPool2D(pool_size=(3, 3))(network)
    network = BatchNormalization()(network)
    network = Dropout(0.25)(network)
    network = Conv2D(384, (3, 3), activation='relu')(network)
    network = Conv2D(384, (3, 3), activation='relu')(network)
    network = Conv2D(256, (3, 3), activation='relu')(network)
    network = MaxPool2D(pool_size=(3, 3), strides=2)(network)
    network = BatchNormalization()(network)
    network = Dropout(0.25)(network)

    network = Flatten()(network)

    network = Dense(4096, activation='tanh')(network)
    network = Dropout(0.5)(network)
    network = Dense(4096, activation='tanh')(network)
    network = Dropout(0.5)(network)
    predictions = Dense(17, activation='softmax')(network)

    # Training
    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer='momentum',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.fit(X, Y, epochs=1000, validation_set=0.1, batch_size=64)
コード例 #19
0
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3, 1], bias=False, name='Conv2d_0c_3x1')))
    tower_mixed = merge([tower_conv, tower_conv1_2], mode='concat', axis=3)
    tower_out = relu(batch_normalization(
        conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
    net += scale * tower_out
    if activation:
        if isinstance(activation, str):
            net = activations.get(activation)(net)
        elif hasattr(activation, '__call__'):
            net = activation(net)
        else:
            raise ValueError("Invalid Activation.")
    return net


X, Y = oxflower17.load_data(one_hot=True, resize_pics=(299, 299))

num_classes = 17
dropout_keep_prob = 0.8

network = input_data(shape=[None, 299, 299, 3])
conv1a_3_3 = relu(batch_normalization(
    conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID', activation=None, name='Conv2d_1a_3x3')))
conv2a_3_3 = relu(
    batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID', activation=None, name='Conv2d_2a_3x3')))
conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
conv3b_1_1 = relu(batch_normalization(
    conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID', activation=None, name='Conv2d_3b_1x1')))
conv4a_3_3 = relu(batch_normalization(
    conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID', activation=None, name='Conv2d_4a_3x3')))
コード例 #20
0
# Job id
run_id = 'alexnet_oxflowers17_' + optimizer_name + str(learning_rate)

# Device
gpu = '/gpu:0'
cpu = '/cpu:0'

# Checkpoint & snapshot
check_path = 'model_' + optimizer_name + str(learning_rate)
max_checkpoints = 10
snapshot_step=200
is_snapshot_epoch=False

# Dataset
import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(dirname='/home/jiawei/dataset/17flowers/', one_hot=True, resize_pics=(227, 227))

tflearn.config.init_graph(log_device=True, soft_placement=True)

# Building 'AlexNet'
network = input_data(shape=[None, 227, 227, 3])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
コード例 #21
0
ファイル: vgg_net.py プロジェクト: kengz/ai-notebook
# Applying VGG 16-layers convolutional network to Oxford's 17 Category Flower
# Dataset classification task.
import tflearn
import os
import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True)

def run():
    net = tflearn.input_data(shape=[None, 224, 224, 3])

    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 128, 3, activation='relu')
    net = tflearn.conv_2d(net, 128, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
コード例 #22
0
    return shuffled_dataset, shuffled_labels


# ######################################################
# 定义模型参数
# ######################################################
tf.flags.DEFINE_string('path', r'../17flowers', '经典数据集地址')
tf.flags.DEFINE_float('learning_rate', 0.002, '学习率')
tf.flags.DEFINE_float('dropout', 1, '每层输出DROPOUT的大小')
tf.flags.DEFINE_integer('batch_size', 32, '小批量梯度下降的批量大小')
tf.flags.DEFINE_float('sample', 0.1, '取样的数目')
tf.flags.DEFINE_integer('num_epoch', 1000, '训练几轮')
tf.flags.DEFINE_integer('num_class', 17, '一共多少类')
FLAGS = tf.flags.FLAGS

train_data, train_label = oxflower17.load_data(dirname=FLAGS.path,
                                               one_hot=True)

train_data, train_label = mess_dataset_order(train_data,
                                             train_label,
                                             dimention=train_data.shape[1])

sample = int(FLAGS.sample * train_data.shape[0])
x, y = train_data[:sample, :, :, :], train_label[:sample]

inputs = tf.placeholder(dtype=tf.float32,
                        shape=[None, 224, 224, 3],
                        name='input')
output = tf.placeholder(tf.float32, [None, 17], name='output')
out, _ = inception_v3(inputs=inputs,
                      num_classes=FLAGS.num_class,
                      is_training=True,
コード例 #23
0
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
    tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
    tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
    net += scale * tower_out
    if activation:
        if isinstance(activation, str):
            net = activations.get(activation)(net)
        elif hasattr(activation, '__call__'):
            net = activation(net)
        else:
            raise ValueError("Invalid Activation.")
    return net

X, Y = oxflower17.load_data(one_hot=True, resize_pics=(299, 299))

num_classes = 17
dropout_keep_prob = 0.8

network = input_data(shape=[None, 299, 299, 3])
conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID',activation=None,name='Conv2d_1a_3x3')))
conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID',activation=None, name='Conv2d_2a_3x3')))
conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
conv3b_1_1 = relu(batch_normalization(conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID',activation=None, name='Conv2d_3b_1x1')))
conv4a_3_3 = relu(batch_normalization(conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID',activation=None, name='Conv2d_4a_3x3')))
maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3')

tower_conv = relu(batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1')))
コード例 #24
0
from tflearn.data_utils import image_preloader

#X, Y = image_preloader(files_list, image_shape=(224, 224), mode='file',
#                       categorical_labels=True, normalize=False,
#                       files_extension=['.jpg', '.png'], filter_channel=True)
# or use the mode 'floder'
# X, Y = image_preloader(data_dir, image_shape=(224, 224), mode='folder',
#                        categorical_labels=True, normalize=True,
#                        files_extension=['.jpg', '.png'], filter_channel=True)

num_classes = 17 # num of your dataset

datapath = '../../data/17flowers224'

import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True, dirname=datapath)

# VGG preprocessing
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center(mean=[123.68, 116.779, 103.939],
                                     per_channel=True)
# VGG Network
x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
                       data_preprocessing=img_prep)
softmax = vgg16(x, num_classes)
regression = tflearn.regression(softmax, optimizer='adam',
                                loss='categorical_crossentropy',
                                learning_rate=0.001, restore=False)

model = tflearn.DNN(regression, checkpoint_path='vnf-ckpt/vgg-finetuning-pre',
                    max_checkpoints=3, tensorboard_verbose=2,
コード例 #25
0
# import dependencies
import tflearn.datasets.oxflower17 as oxflower17
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.layers import BatchNormalization, Dropout

# load data
X, Y = oxflower17.load_data(one_hot=True,
                            dirname='data/17flowers')

# model specification
model = Sequential()

# first conv-pool block
model.add(Conv2D(96, kernel_size=(11, 11), strides=(4, 4), activation='relu', input_shape=(224, 224, 3)))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())

# second conv-pool block
model.add(Conv2D(256, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())

# third conv-pool block
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())

# dense block
コード例 #26
0
ファイル: Day_03_01_alexnet_quiz.py プロジェクト: redshim/CNN
# Day_03_01_alexnet_quiz.py
import numpy as np
import tensorflow as tf
import tflearn.datasets.oxflower17 as oxflower17

# def load_data(dirname="17flowers_onehot", resize_pics=(224, 224), shuffle=True, one_hot=False):
# 데이터셋 크기 : (1360, 224, 224, 3), (1360, 17)
features, labels = oxflower17.load_data(one_hot=True)
# print(features.shape, labels.shape)
# print(features[:3])
# (1360, 224, 224, 3) (1360, 17)


x_train, x_test = features[:1000], features[1000:]
y_train, y_test = labels[:1000], labels[1000:]

num_labels = 17
batch_size = 32

# [문제 1] shape을 채우세요.
# 일단 여기서 None 대신 batch_size로 대신한다.
# 학습할때만 batch size
x = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
y = tf.placeholder(tf.float32, shape=(None, num_labels))

# ------------------------------------------------------------ #
# 변수 생성

# [문제 2] 아래 숫자들을 채우세요.
w1 = tf.Variable(tf.truncated_normal([11, 11, 3, 96], stddev=0.1))
# 필터 가로, 세로, RGB(채널) 11 x 11 x 3 피처갯수, 96 클래스갯수
コード例 #27
0
ファイル: googlenet.py プロジェクト: EddywardoFTW/tflearn
Links:
    - [GoogLeNet Paper](http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Szegedy_Going_Deeper_With_2015_CVPR_paper.pdf)
    - [Flower Dataset (17)](http://www.robots.ox.ac.uk/~vgg/data/flowers/17/)
"""

from __future__ import division, print_function, absolute_import

import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.merge_ops import merge
from tflearn.layers.estimator import regression

import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

network = input_data(shape=[None, 227, 227, 3])
conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')

# 3a
inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96, 1, activation='relu', name='inception_3a_3_3_reduce')
inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128, filter_size=3,  activation='relu', name='inception_3a_3_3')
inception_3a_5_5_reduce = conv_2d(pool2_3_3, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce')
コード例 #28
0
ファイル: googlenet.py プロジェクト: MaksimSkavysh/CNN
Links:
    - [GoogLeNet Paper](http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Szegedy_Going_Deeper_With_2015_CVPR_paper.pdf)
    - [Flower Dataset (17)](http://www.robots.ox.ac.uk/~vgg/data/flowers/17/)
"""

from __future__ import division, print_function, absolute_import

import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.merge_ops import merge
from tflearn.layers.estimator import regression

import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

network = input_data(shape=[None, 227, 227, 3])
conv1_7_7 = conv_2d(network,
                    64,
                    7,
                    strides=2,
                    activation='relu',
                    name='conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3,
                           64,
                           1,
                           activation='relu',
                           name='conv2_3_3_reduce')
コード例 #29
0
    def run(self):
        hyperparams = self._parent.hyperparams
        output_dir = hyperparams['output_dir']
        epochs = int(hyperparams['epochs'])
        batch_size = int(hyperparams['batch_size'])

        validation_split = float(hyperparams['validation_split'])
        conv2d_1_filters = int(hyperparams['conv2d_1_filters'])
        conv2d_2_filters = int(hyperparams['conv2d_2_filters'])
        conv2d_3_filters = int(hyperparams['conv2d_3_filters'])
        conv2d_4_filters = int(hyperparams['conv2d_4_filters'])
        conv2d_5_filters = int(hyperparams['conv2d_5_filters'])
        dense_1_units = int(hyperparams['dense_1_units'])
        dense_2_units = int(hyperparams['dense_2_units'])
        dense_3_units = int(hyperparams['dense_3_units'])
        drop_1_rate = float(hyperparams['drop_1_rate'])
        drop_2_rate = float(hyperparams['drop_2_rate'])
        fit_verbose = int(hyperparams['fit_verbose'])
        X, Y = oxflower17.load_data(one_hot=True)

        model = Sequential()
        model.add(
            Conv2D(conv2d_1_filters,
                   kernel_size=(11, 11),
                   strides=(4, 4),
                   activation='relu',
                   input_shape=(224, 224, 3)))
        model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(BatchNormalization())

        model.add(
            Conv2D(conv2d_2_filters, kernel_size=(5, 5), activation='relu'))
        model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(BatchNormalization())

        model.add(
            Conv2D(conv2d_3_filters, kernel_size=(3, 3), activation='relu'))
        model.add(
            Conv2D(conv2d_4_filters, kernel_size=(3, 3), activation='relu'))
        model.add(
            Conv2D(conv2d_5_filters, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(BatchNormalization())

        model.add(Flatten())
        model.add(Dense(dense_1_units, activation='tanh'))
        model.add(Dropout(drop_1_rate))
        model.add(Dense(dense_2_units, activation='tanh'))
        model.add(Dropout(drop_2_rate))
        model.add(Dense(dense_3_units, activation='softmax'))

        model.summary()

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        modelcheckpoint = ModelCheckpoint(filepath=output_dir +
                                          "/weights.{epoch:02d}.hdf5")
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        tensorbrd = TensorBoard('tb_logs/alexnet')

        self._parent.history = model.fit(
            X,
            Y,
            batch_size=batch_size,
            epochs=epochs,
            verbose=fit_verbose,
            validation_split=validation_split,
            shuffle=True,
            callbacks=[modelcheckpoint, tensorbrd])

        weights_filepath = output_dir + "/weights.{:02d}.hdf5".format(
            best_epoch(self._parent.history) + 1)

        for cfg in model.get_config():
            print(cfg)
        print("epochs={}, batch_size={} best weights filepath {}".format(
            epochs, batch_size, weights_filepath))
        evt = TrainingDoneEvent(EVT_WORK_DONE_TYPE, -1)
        wx.PostEvent(self._parent, evt)
コード例 #30
0
ファイル: oxflowers17.py プロジェクト: BigRLab/TFRecordsMaker
def download():
    oxflower17.load_data('Total_Data/flowers17/')
コード例 #31
0
import keras
import tensorflow as tf
import tflearn
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten,\
 Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import numpy as np
np.random.seed(1000)

# (2) Get Data
import tflearn.datasets.oxflower17 as oxflower17
x, y = oxflower17.load_data(one_hot=True)

# (3) Create a sequential model
model = Sequential()

# 1st Convolutional Layer
model.add(Conv2D(filters=96, input_shape=(224,224,3), kernel_size=(11,11),\
 strides=(4,4), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation before passing it to the next layer
model.add(BatchNormalization())

# 2nd Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
コード例 #32
0
    # 全连接层1
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    # 全连接层2
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    # 输出层
    network = fully_connected(network, 17, activation='softmax')
    network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001)
    return network


if __name__ == '__main__':
    # 加载数据
    dataset = 'alexnet_oxflower17'
    X, Y = oxflower17.load_data(dirname='../datasets/17flowers', one_hot=True, resize_pics=(227, 227))
    # 构建模型
    alexnet = alex_net()
    modal = tflearn.DNN(alexnet, checkpoint_path='./model/', max_checkpoints=1, tensorboard_verbose=2)
    # 检查点
    model_file = './model/' + dataset + '.model'
    if os.path.isfile(model_file):
        modal.load(model_file)
    try:
        modal.fit(X, Y, n_epoch=10, validation_set=0.2, shuffle=True,
                  show_metric=True, batch_size=16, snapshot_step=200,
                  snapshot_epoch=True, run_id=dataset)
        modal.save(model_file)
    except KeyboardInterrupt as i:
        print('Closed by an KeyboardInterrupt')
    finally:
コード例 #33
0
Links:
    [MNIST Dataset] http://yann.lecun.com/exdb/mnist/

"""
from __future__ import division, print_function, absolute_import

import numpy as np
import matplotlib.pyplot as plt
import tflearn
import tflearn.datasets.oxflower17 as oxflower17
import time

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist

X, Y = oxflower17.load_data(one_hot=True, resize_pics=(28, 28))
X = np.reshape(X, (len(X), 2352))
testX = X[1000:]
X = X[:1000]
start_time = time.time()

# Building the encoder
encoder = tflearn.input_data(shape=[None, 2352])
encoder = tflearn.fully_connected(encoder, 2100)
encoder = tflearn.fully_connected(encoder, 200)

# Building the decoder
decoder = tflearn.fully_connected(encoder, 2100)
decoder = tflearn.fully_connected(decoder, 2352, activation='sigmoid')

# Regression, with mean square error
コード例 #34
0
    (X, Y), (testX, testY) = cifar10.load_data()
    X, Y = shuffle(X, Y)
    Y = to_categorical(Y)
    testY = to_categorical(testY)
    X = shapeToOneD(X)
    Y = shapeToOneD(Y)
    testX = shapeToOneD(testX)
    testY = shapeToOneD(testY)
elif jdata.model == "cifar100":  # input 1024 - output 100
    print("https://www.cs.toronto.edu/~kriz/cifar.html")
    from tflearn.datasets import cifar100
    (X, Y), (testX, testY) = cifar100.load_data()
elif jdata.model == "oxflower17.py": # input 50176 - output 17
    print("http://www.robots.ox.ac.uk/~vgg/data/flowers/17/")
    from tflearn.datasets import oxflower17
    (X, Y) = oxflower17.load_data()
elif jdata.model == "svhn":  # input 1024 - output 10
    print("http://ufldl.stanford.edu/housenumbers")
    from tflearn.datasets import svhn
    X, Y, testX, testY = svhn.load_data()
else:
    sys.exit(1)

# Building deep neural network
net = tflearn.input_data(shape=[None, inputlayer], name='input')

modelFilename = ""
for i in range(innerLayers):
    modelFilename += str(jdata.layers[i].nodes) + str(jdata.layers[i].activation)[:5]
    net = tflearn.fully_connected(net, int(jdata.layers[i].nodes), bias=jdata.layers[i].bias, activation=str(jdata.layers[i].activation), name='dense'+str(i))
# output layer
コード例 #35
0
                 dtype=tf.float32,
                 initializer=tf.random_normal_initializer(mean=0.0,
                                                          stddev=0.1)):
    """
    定义获取w,b随机初始值得函数封装,w值对应卷积的卷积窗口大小
    :param name: 变量的命名值
    :param shape: 变量的形状
    :param dtype: 变量的数据类型
    :param initializer: 变量的初始化
    :return: tensor的初始化对象
    """
    return tf.get_variable(name, shape, dtype, initializer)


# 一 加载数据
X, Y = oxflower17.load_data(dirname="17flowers", one_hot=True)
print(X.shape)  # (1360,224, 224, 3)
print(Y.shape)  # (1360,17)

# 二 构建网络

# 1.超参数
# 2.变量类型w,b,占位符
x_input = tf.placeholder(shape=[None, 224, 224, 3], dtype=tf.float32)
y_out = tf.placeholder(shape=[None, 17], dtype=tf.float32)
learn_rate = tf.placeholder(dtype=tf.float32)


# 3.VGG网络
def VGG():
    # 第一层卷积层(224,224,64)