def buildNetwork(n, k):
    # Real-time data preprocessing
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(per_channel=True)
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()

    # Building Residual Network
    net = tflearn.input_data(shape=[None, b.IMG_WIDTH, b.IMG_HEIGHT, b.CHANNELS], data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)

    # wideresnet part
    net = wideresnet_block(net, n, 16, k, downsample=True)
    net = wideresnet_block(net, 1, 32, k, downsample=True)
    net = wideresnet_block(net, n - 1, 32, k, downsample=True)
    net = wideresnet_block(net, 1, 64, 2, downsample=True)
    net = wideresnet_block(net, n - 1, 64, k, downsample=True)

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)

    # Regression
    net = tflearn.fully_connected(net, b.CLASS_3_NUMBER, activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom, loss='categorical_crossentropy')
    model = tflearn.DNN(net, tensorboard_verbose=0, clip_gradients=0., tensorboard_dir=b.PATH + 'log')
    return model
Exemplo n.º 2
0
def get_input_layer(flags):
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    curr = input_data(shape=[None, flags.inres, flags.inres, flags.inchan],
                      name='input',
                      data_preprocessing=img_prep)

    if flags.rts_aug:
        w, h = curr.get_shape().as_list()[1:3]
        a = -flags.rts_aug_ang + 2 * flags.rts_aug_ang * tf.random_uniform(
            [flags.bs])
        a *= np.pi / 180
        # centralize rot/scale
        y = ((w - 1) - (tf.cos(a) * (w - 1) - tf.sin(a) * (h - 1))) / 2.0
        x = ((h - 1) - (tf.sin(a) * (w - 1) + tf.cos(a) * (h - 1))) / 2.0
        transforms = tf.transpose(
            tf.stack([
                tf.cos(a),
                tf.sin(a), x, -tf.sin(a),
                tf.cos(a), y,
                tf.zeros(flags.bs),
                tf.zeros(flags.bs)
            ]))

        return tf.cond(tflearn.get_training_mode(),
                       lambda: tf.contrib.image.transform(curr, transforms),
                       lambda: curr)
    else:
        return curr
Exemplo n.º 3
0
    def build_network(self):
        """
        Build the convnet.
        Input is 48x48
        3072 nodes in fully connected layer
        """
        # Real-time data preprocessing
        img_prep = tflearn.ImagePreprocessing()
        img_prep.add_featurewise_zero_center(
            per_channel=True, mean=[0.53990436, 0.4405486, 0.39328504])

        # Real-time data augmentation
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()
        img_aug.add_random_crop([49, 49], padding=4)

        # Building Residual Network
        self.network = tflearn.input_data(shape=[None, 49, 49, 3],
                                          data_preprocessing=img_prep,
                                          data_augmentation=img_aug)
        self.network = tflearn.conv_2d(self.network,
                                       16,
                                       3,
                                       regularizer='L2',
                                       weight_decay=0.0001)
        self.network = tflearn.resnext_block(self.network, 5, 16, 32)
        self.network = tflearn.resnext_block(self.network,
                                             1,
                                             32,
                                             32,
                                             downsample=True)
        self.network = tflearn.resnext_block(self.network, 4, 32, 32)
        self.network = tflearn.resnext_block(self.network,
                                             1,
                                             64,
                                             32,
                                             downsample=True)
        self.network = tflearn.resnext_block(self.network, 4, 64, 32)
        self.network = tflearn.batch_normalization(self.network)
        self.network = tflearn.activation(self.network, 'relu')
        self.network = tflearn.global_avg_pool(self.network)
        # Regression
        self.network = tflearn.fully_connected(self.network,
                                               11,
                                               activation='softmax')
        opt = tflearn.Momentum(0.1,
                               lr_decay=0.1,
                               decay_step=32000,
                               staircase=True)
        self.network = tflearn.regression(self.network,
                                          optimizer=opt,
                                          loss='categorical_crossentropy')
        # Training
        self.model = tflearn.DNN(self.network,
                                 checkpoint_path='Snapshots/model_resnext',
                                 max_checkpoints=10,
                                 tensorboard_verbose=0,
                                 tensorboard_dir='Logs/',
                                 clip_gradients=0.)
        self.load_model()
Exemplo n.º 4
0
def return2img():

    n = 5

    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(per_channel=True)

    # Real-time data augmentation
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_crop([32, 32], padding=4)

    # Building Residual Network
    net = tflearn.input_data(shape=[None, 32, 32, 1],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, 16)
    net = tflearn.residual_block(net, 1, 32, downsample=True)
    net = tflearn.residual_block(net, n - 1, 32)
    net = tflearn.residual_block(net, 1, 64, downsample=True)
    net = tflearn.residual_block(net, n - 1, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, 3, activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                             loss='categorical_crossentropy')
    # Training
    model = tflearn.DNN(net)
    model.load("model_resnet_+-------cifar10-6500")
    img = load_img_label('/media/bai/Elements/LiangData_Afterchoose/img1')
    livermask = load_mask('/media/bai/Elements/LiangData_Afterchoose/mask1')
    abc=[]
    np.array(abc)
    for i in range(img.shape[0]):
        a = np.zeros((512,512,3,3),dtype=np.float32)#最后一项表示第几个分割方式,倒数第二项表示预测的三个结果
        for j in range(0,3):
           this_mask =  livermask[i, :, :, j]
           this_mask_num = int(np.max(this_mask))
           for k in range(1,this_mask_num+1):
               if os.path.exists('/media/bai/Elements/LiangData_Afterchoose/superpixel/'+str(i)+'_'+str(j)+'_'+str(k)+'.jpg'):
                   this_img = io.imread('/media/bai/Elements/LiangData_Afterchoose/superpixel/'+str(i)+'_'+str(j)+'_'+str(k)+'.jpg')
                   this_img = np.reshape(this_img,(1,32,32,1))
                   this_img = this_img.astype(np.float32)
                   result = model.predict(this_img)
                  # print(result)
                   abc.append(result)
                   thisPartLoc=np.where(this_mask==k)
                   for num in range(len(thisPartLoc[1])):
                       a[thisPartLoc[0][num],thisPartLoc[1][num],0,j] = result[0,0]
                       a[thisPartLoc[0][num],thisPartLoc[1][num], 1, j] = result[0,1]
                       a[thisPartLoc[0][num],thisPartLoc[1][num], 2, j] = result[0,2]
        b = np.max(a,axis=3)
        final = np.argmax(b,axis=2)
        #添加3D CRF
        misc.imsave('/media/bai/Elements/LiangData_Afterchoose/result/' + str(i) + '.jpg', final)
Exemplo n.º 5
0
  def build_network(self):
    # Smaller 'AlexNet'
    # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()
    # img_aug.add_random_flip_updown()
    img_aug.add_random_crop([SIZE_FACE, SIZE_FACE], padding=4)
    img_aug.add_random_rotation(max_angle=16.0)
    
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(per_channel=True)
    img_prep.add_featurewise_stdnorm(per_channel=True)

    print('[+] Building CNN')
    self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1], data_preprocessing=img_prep, data_augmentation=img_aug)

    self.network = conv_2d(self.network, 64, 3, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = conv_2d(self.network, 64, 3, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = local_response_normalization(self.network)
    self.network = max_pool_2d(self.network, 2, strides=2)
    self.network = dropout(self.network, 0.8)

    self.network = conv_2d(self.network, 128, 3, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = conv_2d(self.network, 128, 3, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = max_pool_2d(self.network, 2, strides=2)
    self.network = dropout(self.network, 0.8)

    self.network = conv_2d(self.network, 256, 3, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = conv_2d(self.network, 256, 3, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = max_pool_2d(self.network, 2, strides=2)
    self.network = dropout(self.network, 0.8)

    self.network = conv_2d(self.network, 512, 3, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = conv_2d(self.network, 512, 3, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = max_pool_2d(self.network, 2, strides=2)
    self.network = dropout(self.network, 0.8)

    self.network = fully_connected(self.network, 4096, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = dropout(self.network, 0.7)
    
    self.network = fully_connected(self.network, 4096, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = dropout(self.network, 0.7)
    
    self.network = fully_connected(self.network, 1024, activation='relu', regularizer='L2')#, weight_decay=0.0001)
    self.network = dropout(self.network, 0.7)

    self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax')#, restore=False)

    mom = tflearn.optimizers.Momentum(learning_rate=0.02, lr_decay=0.8, decay_step=500)

    self.network = regression(self.network, optimizer=mom, loss='categorical_crossentropy')#, restore=False)

    self.model = tflearn.DNN(
      self.network,
      tensorboard_dir = '../tmp/',
      checkpoint_path = None,
      max_checkpoints = None,
      tensorboard_verbose = 0
    )
Exemplo n.º 6
0
def get_model(model_name):
    # First we load the network
    print("Setting up neural networks...")
    n = 18

    # Real-time data preprocessing
    print("Doing preprocessing...")
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(
        per_channel=True, mean=[0.573364, 0.44924123, 0.39455055])

    # Real-time data augmentation
    print("Building augmentation...")
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_crop([32, 32], padding=4)

    #Build the model (for 32 x 32)
    print("Shaping input data...")
    net = tflearn.input_data(shape=[None, 32, 32, 3],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)

    print("Carving Resnext blocks...")
    net = tflearn.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n - 1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n - 1, 64, 32)

    print("Erroding Gradient...")
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, 8, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net,
                             optimizer=opt,
                             loss='categorical_crossentropy')

    print("Structuring model...")
    model = tflearn.DNN(net, tensorboard_verbose=0, clip_gradients=0.)

    # Load the model from checkpoint
    print("Loading the model...")
    model.load(model_name)

    return model
Exemplo n.º 7
0
def build(IMAGE_H,
          IMAGE_W,
          IMAGE_C,
          LABELS,
          model_file,
          learning_rate=0.01,
          val_acc_thresh=0.99):
    img_prep = tflearn.ImagePreprocessing()
    # img_prep.add_featurewise_zero_center(per_channel=True)  # 输入图像要减图像均值

    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_rotation(max_angle=10.0)  # 随机旋转角度
    # img_aug.add_random_blur(sigma_max=5.0)

    # Building Residual Network
    net = tflearn.input_data(shape=[None, IMAGE_H, IMAGE_W, IMAGE_C],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug,
                             name='input')

    net = tflearn.conv_2d(net,
                          16,
                          3,
                          regularizer='L2',
                          weights_init='variance_scaling',
                          weight_decay=0.0001,
                          name="conv1")  # 卷积处理, 16个卷积,卷积核大小为3,L2 正则化减少过拟合

    net = tflearn.residual_block(net, 1, 16, name="res1")  # 1 个残差层,输出16特征
    net = tflearn.residual_block(net, 1, 32, downsample=True,
                                 name="res2")  # 1 个残差层,输出32特征,降维1/2
    net = tflearn.residual_block(net, 1, 64, downsample=True,
                                 name="res3")  # 1 个残差层,输出64特征,降维1/2

    # Regression
    net = tflearn.fully_connected(net, len(LABELS), activation='softmax')
    mom = tflearn.Momentum(learning_rate,
                           lr_decay=0.1,
                           decay_step=32000,
                           staircase=True)
    net = tflearn.regression(net,
                             optimizer=mom,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, max_checkpoints=1, tensorboard_verbose=3)

    return model
Exemplo n.º 8
0
def retrain(output_filename):
    num_classes = 120

    # Real-time data preprocessing
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_blur(sigma_max=5.)
    img_aug.add_random_crop((224, 224))
    img_aug.add_random_rotation(max_angle=25.)

    softmax = vgg16(softmax_size=num_classes,
                    restore_softmax=False,
                    data_preprocessing=img_prep,
                    data_augmentation=img_aug)
    regression = tflearn.regression(softmax,
                                    optimizer='rmsprop',
                                    loss='categorical_crossentropy',
                                    learning_rate=0.001)

    model = tflearn.DNN(regression,
                        checkpoint_path=output_filename,
                        max_checkpoints=3,
                        tensorboard_verbose=3)
    # Load pre-existing model, restoring all weights, except softmax layer ones
    model_file = 'vgg/vgg16.tflearn'
    if not os.path.exists(model_file):
        maybe_download(DATA_URL, 'vgg')
    model.load(model_file)

    # Start fine-tuning
    X, Y = grozi120.load_data()
    model.fit(X,
              Y,
              n_epoch=10,
              validation_set=0.1,
              shuffle=True,
              show_metric=True,
              batch_size=64,
              snapshot_step=200,
              snapshot_epoch=False,
              run_id=output_filename)

    model.save(output_filename)
Exemplo n.º 9
0
    def _build_network(self, n, image_size):
        # Define the input to the network.

        img_prep = tflearn.ImagePreprocessing()
        img_prep.add_featurewise_zero_center(per_channel=True)

        # Real-time data augmentation.
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()

        net = tflearn.input_data(shape=[None, image_size[0], image_size[1], 3],
                                 data_preprocessing=img_prep,
                                 data_augmentation=img_aug)

        # Start with a normal convolutional layer.
        net = tflearn.conv_2d(net,
                              64,
                              3,
                              regularizer='L2',
                              weight_decay=0.0001)

        # Since this is a ResNet with <50 layers, we'll use regular residual blocks;
        # otherwise, we'd use residual bottleneck blocks instead.
        net = tflearn.residual_block(net, n, 64)
        net = tflearn.residual_block(net, 1, 128, downsample=True)
        net = tflearn.residual_block(net, n - 1, 128)
        net = tflearn.residual_block(net, 1, 256, downsample=True)
        net = tflearn.residual_block(net, n - 1, 256)

        # Perform batch normalization.
        net = tflearn.batch_normalization(net)

        # Activation at the end of the network pre-FC.
        net = tflearn.activation(net, 'relu')
        net = tflearn.global_avg_pool(net)

        net = tflearn.fully_connected(net, 2, activation='softmax')
        mom = tflearn.Momentum(0.1,
                               lr_decay=0.1,
                               decay_step=32000,
                               staircase=True)
        net = tflearn.regression(net,
                                 optimizer=mom,
                                 loss='categorical_crossentropy')

        return net
Exemplo n.º 10
0
def add_layer(net, lay):
    if lay[0] == 'down_conv_2d':
        net = tfl.conv_2d(net,
                          nb_filter=lay[1],
                          strides=[1, 2, 2, 1],
                          filter_size=lay[2],
                          activation=lay[3])
    elif lay[0] == 'conv_2d':
        net = tfl.conv_2d(net,
                          nb_filter=lay[1],
                          filter_size=lay[2],
                          activation=lay[3])
    elif lay[0] == 'flatten':
        s = net.get_shape()
        net = tf.reshape(net, [tf.shape(net)[0], s[1] * s[2] * s[3]])
    elif lay[0] == 'fully_connected':
        net = tfl.fully_connected(net, n_units=lay[1], activation=lay[2])
    elif lay[0] == 'expand':
        net = tf.reshape(net, [
            tf.shape(net)[0], lay[1], lay[2],
            net.get_shape()[1] / lay[1] / lay[2]
        ])
    elif lay[0] == 'up_conv_2d':
        net = tfl.conv_2d_transpose(net,
                                    nb_filter=lay[1],
                                    filter_size=lay[2],
                                    strides=[1, 2, 2, 1],
                                    output_shape=[lay[3], lay[4]],
                                    activation=lay[5],
                                    padding="same")
    elif lay[0] == 'input_layer':
        image_prep = tfl.ImagePreprocessing()
        image_prep.add_featurewise_stdnorm(per_channel=True,
                                           std=0.24051991589344662)
        image_prep.add_featurewise_zero_center(per_channel=True,
                                               mean=0.14699117337640238)

        net = tfl.layers.input_data(shape=[None, lay[1], lay[2]],
                                    data_preprocessing=image_prep)
        net = tf.expand_dims(net, axis=-1)

    if lay[-1] == 'batch_norm':
        net = tfl.batch_normalization(net)

    return net
Exemplo n.º 11
0
class ConvNeuralNetwork():
    # Image preprocessing
    imgprep = tflearn.ImagePreprocessing()
    imgprep.add_featurewise_zero_center()
    imgprep.add_featurewise_stdnorm()

    # Image augmentation
    imgaug = tflearn.ImageAugmentation()
    imgaug.add_random_rotation()
    imgaug.add_random_flip_leftright()

    # Input layer
    convnet = input_data(shape=[None, settings.IMG_SIZE, settings.IMG_SIZE, 1],
                         data_preprocessing=imgprep,
                         data_augmentation=imgaug,
                         name='input')

    # Hidden layers
    convnet = conv_2d(convnet, 32, 3, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 3, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 128, 3, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 256, 3, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    # Fully connected layer
    convnet = fully_connected(convnet, 1024, activation='relu')
    convnet = dropout(convnet, settings.DROPOUT_RATE)

    # Output layer
    convnet = fully_connected(convnet, 2, activation='softmax')
    convnet = regression(convnet,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=settings.LEARNING_RATE,
                         name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir='log')
Exemplo n.º 12
0
from tflearn.helpers.regularizer import add_weights_regularizer
from tensorflow.contrib.slim import dataset
from tensorflow.contrib.slim import dataset

tflearn.input_data()
tflearn.variable()
tflearn.conv_2d()
tflearn.single_unit()
tflearn.lstm()
tflearn.embedding()
tflearn.batch_normalization()
tflearn.merge()
tflearn.regression()
tflearn.tanh()
tflearn.softmax_categorical_crossentropy()
tflearn.SGD()
tflearn.initializations.uniform()
tflearn.losses.L1()
tflearn.add_weights_regularizer()
tflearn.metrics.Accuracy()
tflearn.summaries()
tflearn.ImagePreprocessing()
tflearn.ImageAugmentation()
tflearn.init_graph()






Exemplo n.º 13
0
def train(env, epoch_num, learning_rate=0.01, clean_start=False):
    '''
    a function that create input, create network, train network and report results
    :param env: local/AWS; environment that our system work on
    :param epoch_num: number of epochs to run
    :return:
    '''

    #----------------------reading constants from config file---------------------------------
    config = configparser.ConfigParser()
    config.read('config.ini')
    conf = config[env]
    image_size = conf['window_size']
    mean_colors = [
        float(conf['mean_r']),
        float(conf['mean_g']),
        float(conf['mean_b'])
    ]
    #-----------------------------------------------------------------------------------------

    #----------------------------------------input layer------------------------------------------
    # read input  http://tflearn.org/data_utils/#build-hdf5-image-dataset
    X_train, Y_train, X_test, Y_test = deep_learning.data_prep(
        conf, clean_start=clean_start)
    #prepare input layer  http://tflearn.org/data_preprocessing/
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(mean=mean_colors, per_channel=True)
    # http://tflearn.org/layers/core/#input-data
    input_layer = input_data(shape=[None, image_size, image_size, 3],
                             name='input_layer')
    #,data_preprocessing=img_prep)
    #---------------------------------------------------------------------------------------------

    #-------------------------------create model--------------------------------------------------------
    # network
    softmax = deep_learning.inception(input_layer, 2)
    #softmax = deep_learning.VGGNet(input_layer, 2)
    # estimator layer
    f_score = tflearn.metrics.F2Score()
    network = tflearn.regression(
        softmax,
        optimizer='momentum',
        loss='categorical_crossentropy',
        learning_rate=learning_rate,
        metric=f_score)  #if want to finetune give 'restore=False'
    # model  http://tflearn.org/models/dnn/
    model = tflearn.DNN(network,
                        checkpoint_path='model_inception',
                        max_checkpoints=1,
                        tensorboard_verbose=0,
                        tensorboard_dir="./logs2")
    if os.path.isfile('inception_ercis.model') and not clean_start:
        print 'load model learning_rate: ' + str(learning_rate)
        model.load('inception_ercis.model', weights_only=True)

    model.fit(X_train,
              Y_train,
              validation_set=(X_test, Y_test),
              n_epoch=epoch_num,
              shuffle=True,
              show_metric=True,
              batch_size=128,
              snapshot_step=200,
              snapshot_epoch=False,
              run_id='inception_ercis')
    #---------------------------------------------------------------------------------------------

    model.save('inception_ercis.model')
Exemplo n.º 14
0
import tflearn
Exemplo n.º 15
0
def train(env, epoch_num, learning_rate = 0.01, clean_start = True, model_name = None):
    '''
    a function that create input, create network, train network and report results
    :param env: local/AWS; environment that our system work on
    :param epoch_num: number of epochs to run
    :return:
    '''
    print 'model_name: ', model_name
    #----------------------reading constants from config file---------------------------------
    config = configparser.ConfigParser()
    config.read('config.ini')
    conf = config[env]
    image_size = conf['window_size']
    mean_colors = [float(conf['mean_r']), float(conf['mean_g']), float(conf['mean_b'])]
    data_splitting_method = conf['data_splitting_method']
    train_size = conf['train_size']
    #-----------------------------------------------------------------------------------------


    #----------------------------------------input layer------------------------------------------
    # read input  http://tflearn.org/data_utils/#build-hdf5-image-dataset
    X_train, Y_train, X_test, Y_test = data_prep(conf, clean_start = clean_start, method=data_splitting_method, training_size=train_size)
    #prepare input layer  http://tflearn.org/data_preprocessing/
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(mean=mean_colors,per_channel=True)
    # http://tflearn.org/layers/core/#input-data
    input_layer = input_data(shape=[None, image_size, image_size, 3], name = 'input_layer')
                             #,data_preprocessing=img_prep)
    #---------------------------------------------------------------------------------------------




    #-------------------------------create model--------------------------------------------------------
    # network
    #softmax = deep_learning.inception(input_layer, 2)
    #softmax = deep_learning.VGGNet(input_layer, 2)
    if model_name.split('.')[0].split('-')[0] == 'inception':
        softmax = deep_learning.inception(input_layer, 2)
    elif model_name.split('.')[0].split('-')[0] == 'ResNet':
        softmax = deep_learning.ResNet(input_layer, 2)
    elif model_name.split('.')[0].split('-')[0] == 'VGGNet':
        softmax = deep_learning.VGGNet(input_layer, 2)
    elif model_name.split('.')[0].split('-')[0] == 'NiN':
        softmax = deep_learning.NiN(input_layer, 2)
    else:
        return
    # estimator layer
    f_score = tflearn.metrics.F2Score()
    momentum = Momentum(learning_rate=0.1, lr_decay=0.9, decay_step=250)
    network = tflearn.regression(softmax, optimizer='adam',
                         loss='categorical_crossentropy',metric=f_score)  #if want to finetune give 'restore=False'
    # model  http://tflearn.org/models/dnn/
    model = tflearn.DNN(network, checkpoint_path='./models', best_checkpoint_path='./models/best',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir="./logs", best_val_accuracy=0.0)
    if model_name != None:
        if os.path.isfile(model_name) and not clean_start:
            print 'load model learning_rate: ' + str(learning_rate) 
            model.load(model_name,weights_only=True)

    model.fit(X_train, Y_train, validation_set = (X_test, Y_test),n_epoch=epoch_num,  shuffle=True,
              show_metric=True, batch_size=128, snapshot_epoch=True, run_id=model_name)
    #---------------------------------------------------------------------------------------------
    print 'saving model.'
    model.save( model_name)
    print 'model: ', model_name, 'saved.'
    return model_name
Exemplo n.º 16
0
def return2img():

    n = 5
    # Real-time data preprocessing
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(per_channel=True)

    # Real-time data augmentation
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_crop([32, 32], padding=4)
    inp = tflearn.input_data(shape=[None, 32, 32, 1],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug,
                             name='input')

    conv1_1 = tflearn.conv_2d(inp, 64, 3, activation='relu', name="conv1_1")
    conv1_2 = tflearn.conv_2d(conv1_1,
                              64,
                              3,
                              activation='relu',
                              name="conv1_2")
    pool1 = tflearn.max_pool_2d(conv1_2, 2, strides=2)

    conv2_1 = tflearn.conv_2d(pool1, 128, 3, activation='relu', name="conv2_1")
    conv2_2 = tflearn.conv_2d(conv2_1,
                              128,
                              3,
                              activation='relu',
                              name="conv2_2")
    pool2 = tflearn.max_pool_2d(conv2_2, 2, strides=2)

    conv3_1 = tflearn.conv_2d(pool2, 256, 3, activation='relu', name="conv3_1")
    conv3_2 = tflearn.conv_2d(conv3_1,
                              256,
                              3,
                              activation='relu',
                              name="conv3_2")
    conv3_3 = tflearn.conv_2d(conv3_2,
                              256,
                              3,
                              activation='relu',
                              name="conv3_3")
    pool3 = tflearn.max_pool_2d(conv3_3, 2, strides=2)

    conv4_1 = tflearn.conv_2d(pool3, 512, 3, activation='relu', name="conv4_1")
    conv4_2 = tflearn.conv_2d(conv4_1,
                              512,
                              3,
                              activation='relu',
                              name="conv4_2")
    conv4_3 = tflearn.conv_2d(conv4_2,
                              512,
                              3,
                              activation='relu',
                              name="conv4_3")
    pool4 = tflearn.max_pool_2d(conv4_3, 2, strides=2)
    conv5_1 = tflearn.conv_2d(pool4, 512, 3, activation='relu', name="conv5_1")
    conv5_2 = tflearn.conv_2d(conv5_1,
                              512,
                              3,
                              activation='relu',
                              name="conv5_2")
    conv5_3 = tflearn.conv_2d(conv5_2,
                              512,
                              3,
                              activation='relu',
                              name="conv5_3")
    pool5 = tflearn.max_pool_2d(conv5_3, 2, strides=2)

    fc6 = tflearn.fully_connected(pool5, 4096, activation='relu', name="fc6")
    fc6_dropout = tflearn.dropout(fc6, 0.5)

    fc7 = tflearn.fully_connected(fc6_dropout,
                                  4096,
                                  activation='relu',
                                  name="fc7")
    fc7_droptout = tflearn.dropout(fc7, 0.5)

    fc8 = tflearn.fully_connected(fc7_droptout,
                                  3,
                                  activation='softmax',
                                  name="fc8")

    mm = tflearn.Momentum(learning_rate=0.01,
                          momentum=0.9,
                          lr_decay=0.1,
                          decay_step=1000)

    network = tflearn.regression(fc8,
                                 optimizer=mm,
                                 loss='categorical_crossentropy',
                                 restore=False)
    # Training
    model = tflearn.DNN(network)
    model.load("model_resnet_+-------cifar10-14000")
    img = load_img_label('/home/bai/最新数据/验证数据/经过预处理的原始图像')
    livermask = load_mask('/home/bai/最新数据/验证数据/mask')
    abc = []
    np.array(abc)
    for i in range(img.shape[0]):
        a = np.zeros((512, 512, 3, 3),
                     dtype=np.float32)  #最后一项表示第几个分割方式,倒数第二项表示预测的三个结果
        for j in range(0, 3):
            this_mask = livermask[i, :, :, j]
            this_mask_num = int(np.max(this_mask))
            for k in range(1, this_mask_num + 1):
                if os.path.exists('/home/bai/最新数据/验证数据/超像素块/' + str(i) + '_' +
                                  str(j) + '_' + str(k) + '.jpg'):
                    this_img = io.imread('/home/bai/最新数据/验证数据/超像素块/' + str(i) +
                                         '_' + str(j) + '_' + str(k) + '.jpg')
                    this_img = np.reshape(this_img, (1, 32, 32, 1))
                    this_img = this_img.astype(np.float32)
                    result = model.predict(this_img)
                    # print(result)
                    abc.append(result)
                    thisPartLoc = np.where(this_mask == k)
                    for num in range(len(thisPartLoc[1])):
                        a[thisPartLoc[0][num], thisPartLoc[1][num], 0,
                          j] = result[0, 0]
                        a[thisPartLoc[0][num], thisPartLoc[1][num], 1,
                          j] = result[0, 1]
                        a[thisPartLoc[0][num], thisPartLoc[1][num], 2,
                          j] = result[0, 2]
        b = np.max(a, axis=3)
        final = np.argmax(b, axis=2)
        misc.imsave(
            '/home/bai/PycharmProjects/DeepRESIDUALNETWORKS/result/' + str(i) +
            '.jpg', final)
Exemplo n.º 17
0
def train_nn_tflearn(data_handler, num_epochs=50):

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)

    batch_size = data_handler.mini_batch_size

    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25)
    img_aug.add_random_crop([32, 32], padding=4)

    x = tflearn.input_data(shape=[None, 32, 32, 3],
                           dtype='float',
                           data_preprocessing=img_prep,
                           data_augmentation=img_aug)
    # x = tf.placeholder('float', [None, 32, 32, 3])
    #y = tf.placeholder('float', [None, 10])

    # test_data, test_labels = data_handler.get_test_data()
    # test_data = test_data.reshape([-1,32,32,3])

    ntrain = data_handler.train_size
    ntest = data_handler.meta['num_cases_per_batch']

    # from tflearn.datasets import cifar10
    # (X, Y), (X_test, Y_test) = cifar10.load_data(dirname="/home/hamza/meh/bk_fedora24/Documents/tflearn_example/cifar-10-batches-py")
    # X, Y = tflearn.data_utils.shuffle(X, Y)
    # Y = tflearn.data_utils.to_categorical(Y, 10)
    # Y_test = tflearn.data_utils.to_categorical(Y_test, 10)

    X, Y = data_handler.get_all_train_data()

    X, Y = tflearn.data_utils.shuffle(X, Y)

    X = np.dstack((X[:, :1024], X[:, 1024:2048], X[:, 2048:]))

    X = X / 255.0

    X = X.reshape([-1, 32, 32, 3])

    Y = tflearn.data_utils.to_categorical(Y, 10)

    X_test, Y_test = data_handler.get_test_data()

    X_test = np.dstack((X_test[:, :1024], X_test[:, 1024:2048], X_test[:,
                                                                       2048:]))

    X_test = X_test / 255.0

    X_test = X_test.reshape([-1, 32, 32, 3])

    #network = tflearn.regression(net3(x),optimizer='adam',loss='categorical_crossentropy',learning_rate=0.001)
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    network = tflearn.regression(resnet1(x),
                                 optimizer=mom,
                                 loss='categorical_crossentropy')

    print np.shape(X)
    print np.shape(Y)
    print network

    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(X,
              Y,
              n_epoch=50,
              shuffle=True,
              validation_set=(X_test, Y_test),
              show_metric=True,
              batch_size=data_handler.mini_batch_size,
              run_id='cifar10_cnn')
Exemplo n.º 18
0
def test(env, model_name, clean_start=False):
    '''
    a function that create input, create network, train network and report results
    :param env: local/AWS; environment that our system work on
    :param epoch_num: number of epochs to run
    :return:
    '''
    #----------------------reading constants from config file---------------------------------
    config = configparser.ConfigParser()
    config.read('config.ini')
    conf = config[env]
    image_size = conf['window_size']
    mean_colors = [
        float(conf['mean_r']),
        float(conf['mean_g']),
        float(conf['mean_b'])
    ]
    #-----------------------------------------------------------------------------------------

    #----------------------------------------input layer------------------------------------------
    # read input  http://tflearn.org/data_utils/#build-hdf5-image-dataset
    X_train, Y_train, X_test, Y_test = data_prep(conf)
    #prepare input layer  http://tflearn.org/data_preprocessing/
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(mean=mean_colors, per_channel=True)
    # http://tflearn.org/layers/core/#input-data
    input_layer = input_data(shape=[None, image_size, image_size, 3],
                             name='input_layer')
    #,data_preprocessing=img_prep)
    #---------------------------------------------------------------------------------------------

    #-------------------------------create model--------------------------------------------------------
    # network
    if model_name.split('.')[0].split('-')[0] == 'inception':
        softmax = deep_learning.inception(input_layer, 2)
    elif model_name.split('.')[0].split('-')[0] == 'ResNet':
        softmax = deep_learning.ResNet(input_layer, 2)
    elif model_name.split('.')[0].split('-')[0] == 'VGGNet':
        softmax = deep_learning.VGGNet(input_layer, 2)
    elif model_name.split('.')[0].split('-')[0] == 'NiN':
        softmax = deep_learning.NiN(input_layer, 2)
    else:
        softmax = deep_learning.inception(input_layer, 2)
    #softmax = deep_learning.inception(input_layer, 2)
    #softmax = deep_learning.VGGNet(input_layer, 2)
    # estimator layer
    f_score = tflearn.metrics.F2Score()
    momentum = Momentum(learning_rate=0.1, lr_decay=0.9, decay_step=250)
    network = tflearn.regression(
        softmax,
        optimizer='adam',
        loss='categorical_crossentropy',
        learning_rate=0.0001,
        metric=f_score)  #if want to finetune give 'restore=False'
    # model  http://tflearn.org/models/dnn/
    model = tflearn.DNN(network,
                        checkpoint_path='model_' + model_name,
                        max_checkpoints=1,
                        tensorboard_verbose=0,
                        tensorboard_dir="./logs_test")
    if os.path.isfile(model_name) and not clean_start:
        model.load(model_name)

    #model.fit(X_train, Y_train, validation_set = (X_test, Y_test),n_epoch=epoch_num,  shuffle=True,
    #          show_metric=True, batch_size=100, snapshot_step=100, snapshot_epoch=False, run_id='inception_ercis')
    #---------------------------------------------------------------------------------------------
    #model.save('inception.model')
    temp = []
    counter = 0
    for x in X_test:
        temp.append(model.predict(x.reshape(1, 40, 40, 3))[0])
        counter += 1
        print '{0}\r'.format(counter),
    #print temp
    predict = temp
    with open('results/test_pred_{0}.pik'.format(gettime()), 'w') as f:
        pickle.dump(predict, f)

    target = np.argmax(Y_test[()], axis=1)
    pred = np.argmax(predict, axis=1)

    #print Y_test.shape
    #for x,y in zip(target, pred):
    #    print x, y
    print type(target)
    print target.shape
    print type(pred)
    print pred.shape
    #for y in Y_test:
    #    print y
    #try reporting
    #try:
    print 'accuracy_score: ', accuracy_score(target, pred)
    print 'recall_score: ', recall_score(target, pred)
    print 'precision_score: ', precision_score(target, pred)
    print 'f2_score: ', fbeta_score(target, pred, 2)
    print 'confusion_matrix: '
    print confusion_matrix(target, pred)
    with open('results/results.txt', 'a+') as f:
        f.write('# ' + model_name.split('_')[0] + '_' + gettime() + '\n')
        f.write('accuracy_score: {0}\n'.format(accuracy_score(target, pred)))
        f.write('recall_score: {0}\n'.format(recall_score(target, pred)))
        f.write('precision_score: {0}\n'.format(precision_score(target, pred)))
        f.write('f2_score: {0}\n'.format(fbeta_score(target, pred, 2)))
        f.write('confusion_matrix: \n')
        f.write(str(confusion_matrix(target, pred)))
        f.write('\n\n\n')
import tensorflow as tf
# import data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data(label_mode='fine')

# data setting
y_train = y_train.reshape(50000,)
y_test = y_test.reshape(10000,)
train_data = np.asarray(x_train, dtype=np.float32)
train_labels =np.asarray(y_train, dtype=np.float32)
eval_data = np.asarray(x_test, dtype=np.float32)
eval_labels = np.asarray(y_test, dtype=np.float32)
# one hot encoding
y_train = tflearn.data_utils.to_categorical(y_train, 100)
y_test = tflearn.data_utils.to_categorical(y_test, 100)
# image argumentation
image_preprocessing = tflearn.ImagePreprocessing()
image_preprocessing.add_featurewise_zero_center(per_channel=True)
image_argumentation = tflearn.ImageAugmentation()
image_argumentation.add_random_flip_leftright()
image_argumentation.add_random_crop([32, 32], padding=4)
# build 56 layers residual layers
n = 9
net = tflearn.input_data(shape=[None, 32, 32, 3],
                          data_preprocessing=image_preprocessing,
                          data_augmentation=image_argumentation)
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
	def image_prep(self, stdn = True, mean = True):
		image_prep = tfl.ImagePreprocessing()
		if stdn: image_prep.add_featurewise_stdnorm(per_channel=True)
		if mean: image_prep.add_featurewise_zero_center(per_channel=True)
		return image_prep
def cnn_model_ic(features, labels, mode):
        """Model function for CNN."""
        
        # Real-time data preprocessing
        print("Doing preprocessing...")
        img_prep = tflearn.ImagePreprocessing()
        img_prep.add_featurewise_zero_center(per_channel=True, mean=[0.573364,0.44924123,0.39455055])
            
            # Real-time data augmentation
        print("Building augmentation...")
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()
        img_aug.add_random_crop([128, 128], padding=4)
        
        # Input Layer
        # Reshape X to 4-D tensor: [batch_size, width, height, channels]
        # 61326 images are 4160x3120 pixels, i resize to 128*128 and have three color channel
        input_layer = tflearn.input_data(shape=[-1, 128, 128, 3],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
        input_layer = tf.reshape(features["x"], [-1, 128, 128, 3])
        # Convolutional Layer #1
        # Computes 32 features using a 5x5 filter with ReLU activation.
        # Padding is added to preserve width and height.
        # Input Tensor Shape: [batch_size, 128, 128, 1]
        # Output Tensor Shape: [batch_size, 128, 128, 32]
        conv1 = tf.layers.conv2d(
                inputs=input_layer,
                filters=32,
                kernel_size=[5, 5],
                padding="same",
                activation=tf.nn.relu)
        print "Conv1 is completed"
        # Pooling Layer #1
        # First max pooling layer with a 2x2 filter and stride of 2
        # Input Tensor Shape: [batch_size, 128, 128, 32]
        # Output Tensor Shape: [batch_size, 64, 64, 32]
        pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
        # Convolutional Layer #2
        # Computes 64 features using a 5x5 filter.
        # Padding is added to preserve width and height.
        # Input Tensor Shape: [batch_size, 64, 64, 32]
        # Output Tensor Shape: [batch_size, 64, 64, 64]
        print "Conv2 is in progress"
        conv2 = tf.layers.conv2d(
                inputs=pool1,
                filters=64,
                kernel_size=[5, 5],
                padding="same",
                activation=tf.nn.relu)
        # Pooling Layer #2
        # Second max pooling layer with a 2x2 filter and stride of 2
        # Input Tensor Shape: [batch_size, 64, 64, 64]
        # Output Tensor Shape: [batch_size, 32, 32, 64]
        print "Conv2 is completed"
        pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
        # Flatten tensor into a batch of vectors
        # Input Tensor Shape: [batch_size, 32, 32, 64]
        # Output Tensor Shape: [batch_size, 32 * 32 * 64]
        print "pool2 is in processing"
        pool2_flat = tf.reshape(pool2, [-1, 32 * 32 * 64])
        print "pool2 is completed"
        # Dense Layer
        # Densely connected layer with 1024 neurons
        # Input Tensor Shape: [batch_size, 32 * 32 * 64]
        # Output Tensor Shape: [batch_size, 1024]
        dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
        # Add dropout operation; 0.6 probability that element will be kept
        dropout = tf.layers.dropout(
                inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
        # Logits layer
        # Input Tensor Shape: [batch_size, 1024]
        # Output Tensor Shape: [batch_size, 6]
        logits = tf.layers.dense(inputs=dropout, units=7)
        
        predictions = {
        # Generate predictions (for PREDICT and EVAL mode)
            "classes": tf.argmax(input=logits, axis=1),
        # Add `softmax_tensor` to the graph. It is used for PREDICT and by the
        # `logging_hook`.
            "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
            }
        
        if mode == tf.estimator.ModeKeys.PREDICT:
            return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
        # Calculate Loss (for both TRAIN and EVAL modes)
        loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
        # Configure the Training Op (for TRAIN mode)
        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
            train_op = optimizer.minimize(
                loss=loss,
                global_step=tf.train.get_global_step())
            return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
        # Add evaluation metrics (for EVAL mode)
        eval_metric_ops = {
                "accuracy": tf.metrics.accuracy(
                    labels=labels, predictions=predictions["classes"])}
        return tf.estimator.EstimatorSpec(
                mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
Exemplo n.º 22
0
    def AudioPredict(soundfile, class_test,threshold):
        import argparse
        import tensorflow as tf
        import tflearn
        from tflearn.layers.core import input_data, dropout, fully_connected
        from tflearn.layers.conv import conv_2d, max_pool_2d
        from tflearn.layers.normalization import local_response_normalization
        from tflearn.layers.estimator import regression
        import os
        import numpy as np
        import cv2
        from skimage.util.shape import view_as_blocks
        from skimage.util.shape import view_as_windows
        np.random.seed(1001)
        import librosa
        from pytictoc import TicToc
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 
        tf.logging.set_verbosity(tf.logging.ERROR)

        patchscore = float(threshold.decode('base64', 'strict'))

        timet = TicToc()
        timet.tic()

        goobeps=0.82
        islandps=0.82
        mlworldps=0.82
        bootps=0.82
        tpcount=2e9
        
        tt=0

        classtestlist=['boot','goobe','island', 'mlworld']
        for p in range(len(classtestlist)):
            if class_test==classtestlist[p]:
                tt+=1
        if tt==0:
        
            print('class_test ', class_test, ' entered is not valid')
            print('acceptable class entries: ', classtestlist)

        print('loading audio file')

        #load sound file, get melspectrogram, crop overlapping tiles and save tensor
        X=np.random.random((1,128,128,1))
        wav1, sr = librosa.core.load(soundfile)
        stepper=int(np.ceil(len(wav1)/sr/6))
        Sxx=librosa.feature.melspectrogram(wav1)
        Sxx2 = np.log10(1+10*abs(Sxx))
        Sxx2norm=255*(Sxx2-Sxx2.min())/(Sxx2.max()-Sxx2.min());
        Sxx2norm=np.round_(Sxx2norm, decimals=0, out=None).astype(int)
        Sxx2norm=Sxx2norm.astype(np.uint8)
     
        if Sxx2norm.shape[1]>=128:
            s1=int(np.floor(Sxx2norm.shape[1]/128)*128)
            B = view_as_windows(Sxx2norm, window_shape=(128, 128),step=stepper) 

            for index in range(B.shape[1]):
                x=B[0][index]
                x = np.expand_dims(x, axis=0)
                x = np.expand_dims(x, axis=3)
                X = np.concatenate((X,x),axis=0)

        # remove random start
        X = X[1:,:,:,:]
        tf.reset_default_graph()

         # number of classes
        y=2
         
         # image preprocessing
        img_prep = tflearn.ImagePreprocessing()
        img_prep.add_featurewise_zero_center()# Zero Center (With mean computed over the whole dataset)
        img_prep.add_featurewise_stdnorm() #STD Normalization (With std computed over the whole dataset)

        # Building 'AlexNet'
        network = input_data(shape=[None, 128, 128, 1],data_preprocessing=img_prep)
        network = conv_2d(network, 96, 11, strides=4, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = local_response_normalization(network)
        network = conv_2d(network, 256, 5, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = local_response_normalization(network)
        network = conv_2d(network, 384, 3, activation='relu')
        network = conv_2d(network, 384, 3, activation='relu')
        network = conv_2d(network, 256, 3, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = local_response_normalization(network)
        network = fully_connected(network, 4096, activation='tanh')
        network = dropout(network, 0.5)
        network = fully_connected(network, 4096, activation='tanh')
        network = dropout(network, 0.5)
        network = fully_connected(network, y, activation='softmax')
        network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)

        model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=2)

        # set up paramaters for model
        if class_test=='boot':
            classmodel='AudioModels/model_Alexnet_bootnet1/Alexnet_bootnet1' 
            patchscore=bootps
            tilepasscount=np.floor(tpcount/(len(wav1)*len(X)))
            if tilepasscount>len(X):
                tilepasscount=np.floor(0.75*len(X))
            
            print('loading boot test')
        elif class_test=='goobe':
            print('loading goobe test')
            classmodel='AudioModels/model_Alexnet_goobenet1/Alexnet_goobenet1'
            patchscore=goobeps
            tilepasscount=np.floor(tpcount/(len(wav1)*len(X)))
            if tilepasscount>len(X):
                tilepasscount=np.floor(0.25*len(X))

        elif class_test=='island':
            print('loading island test')
            classmodel='AudioModels/model_Alexnet_islandnet1/Alexnet_islandnet1'
            patchscore=islandps
            tilepasscount=np.floor(tpcount/(len(wav1)*len(X)))
            if tilepasscount>len(X):
                tilepasscount=np.floor(0.25*len(X))

        elif class_test=='mlworld':
            print('loading mlworld test')
            classmodel='AudioModels/model_Alexnet_mlworldnet1/Alexnet_mlworldnet1'
            patchscore=mlworldps
            tilepasscount=np.floor(tpcount/(len(wav1)*len(X)))
            if tilepasscount>len(X):
                tilepasscount=np.floor(0.25*len(X))

        
        model.load(classmodel)
        
        print("totaltilecount", len(X))
        print("tilepasscount", tilepasscount)
        lastPF=[]

        # Get prediction of tile list
        res=model.predict(X)
        
        print(res)

        # method 1: PASS if any one tile gets 99% or better
        topmatch=0
        for k in range(len(res[:,0])):
            if res[k,0]>=0.99:
                topmatch+=1
        
        # tally consecutive tiles that meet the threshold
        z = np.where((res[:,0]-patchscore)>0)
        z=z[0]
        tally=[0]
        for j in range(len(z)-1):
            if z[j+1]-z[j]==1:
                tally.append(1)
        if(sum(tally)!=0):
            tally.append(1)
        timet.toc()
        
        if(sum(tally)>=tilepasscount or topmatch>=1):
            lastPF.append(1)
            print(sum(tally),'/',len(X))
        else:

            lastPF.append(0)
            print(sum(tally),'/',len(X))
        
        if np.sum(lastPF)>=1:
       
            print('PASS')
            ret ='PASS'
            return ret
        else:
            print('FAIL')
            ret = 'FAIL'
            return ret
Exemplo n.º 23
0
import csv
from os import listdir
from tifffile import imread
from math import ceil, log
import pickle 

class_map = {0:'building', 1:'barren_land',2:'trees',3:'grassland',4:'road',5:'water'}

tflearn.config.init_graph (num_cores=1, gpu_memory_fraction=0.3)

# Residual blocks
# 256 layers: n=5, 256 layers: n=9, 110 layers: n=18
n = 5

# Real-time data preprocessing
img_prep = tflearn.ImagePreprocessing()
img_prep.add_featurewise_zero_center([112.3256420256,1256.6802564592,1256.1983022562], per_channel=True)


# Building Residual Network
net = tflearn.input_data(shape=[None, 28, 28, 3], data_preprocessing=img_prep)
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
Exemplo n.º 24
0
def main(args):
    # Data loading
    fid = args.fid + "train"
    data = unpickle(fid)
    n_class = args.nclass

    train_feats, train_labs = load_data(args, "train")
    test_feats, test_labs = load_data(args, "test")

    # Real-time data preprocessing
    mean = [129.30416561, 124.0699627, 112.43405006]
    std = [51.20360335, 50.57829831, 51.56057865]
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(per_channel=True, mean=mean)
    img_prep.add_featurewise_stdnorm(per_channel=True, std=std)
    # Real-time data augmentation
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_crop([32, 32], padding=4)

    # DenseNet
    depth = args.depth
    filters = args.growth_rate
    nb_blocks = 3
    #nb_layers_list = [6,12,48,32]
    nb_layers_list = [(depth - (nb_blocks + 1)) // (2 * nb_blocks)
                      for i in range(nb_blocks)]
    print(nb_layers_list)
    net = tflearn.input_data(shape=[None, 32, 32, 3],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    print("input", net)
    net = tflearn.conv_2d(net,
                          nb_filter=2 * filters,
                          filter_size=3,
                          strides=1,
                          padding='same',
                          activation='linear',
                          bias=False,
                          name='conv0',
                          regularizer='L2',
                          weight_decay=1e-4)
    # net = tflearn.max_pool_2d(net, kernel_size=3, strides=2, padding='valid')
    print("init_layer", net)

    for i in range(nb_blocks - 1):
        net = dense_block(net,
                          filters,
                          nb_layers=nb_layers_list[i],
                          layer_name='dense_' + str(i + 1))
        net = transition_layer(net, scope='trans_' + str(i + 1))

    net = dense_block(net,
                      filters,
                      nb_layers=nb_layers_list[-1],
                      layer_name='dense_final')

    # Global Avg + FC
    net = tflearn.batch_normalization(net, scope='linear_batch')
    net = tf.nn.relu(net)
    net = tflearn.global_avg_pool(net)
    if args.finetune == 1:
        net = tflearn.fully_connected(net,
                                      n_class,
                                      activation='softmax',
                                      regularizer='L2',
                                      weight_decay=1e-4,
                                      restore=False)
    else:
        net = tflearn.fully_connected(net,
                                      n_class,
                                      activation='softmax',
                                      regularizer='L2',
                                      weight_decay=1e-4)

    # Optimizer
    opt = tf.train.MomentumOptimizer(learning_rate=args.lr,
                                     momentum=0.9,
                                     use_nesterov=True)
    epsilon = 1e-4
    learning_rate = 1e-4
    # opt = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=epsilon)

    # Regression
    net = tflearn.regression(net,
                             optimizer=opt,
                             loss='categorical_crossentropy',
                             restore=False)

    # Training
    config = tf.ConfigProto()
    config.allow_soft_placement = True
    config.gpu_options.allow_growth = True
    tf.add_to_collection(tf.GraphKeys.GRAPH_CONFIG, config)
    model = tflearn.DNN(net,
                        checkpoint_path='/data/srd/models/image/model_' +
                        args.model_name + '/model',
                        tensorboard_dir='/data/srd/logs/image/log_' +
                        args.model_name,
                        max_checkpoints=3,
                        tensorboard_verbose=0,
                        clip_gradients=0.0)

    if args.onlyevalue == 1:
        model.load("/data/srd/models/image/" + args.pre_train + "/model.tfl")
        n_test = len(test_feats)
        n_batch = 10
        batch_size = n_test / 10
        labsp = model.predict(test_feats[0:batch_size])
        for i in range(1, 10):
            labsp = np.vstack([
                labsp,
                model.predict(test_feats[i * batch_size:(i + 1) * batch_size])
            ])
        print(metrics.classification_report(test_labs, np.argmax(labsp, 1)))
        print("acc:", metrics.accuracy_score(test_labs, np.argmax(labsp, 1)))

        np.argmax(labsp,
                  1).tofile("/data/srd/data/cifar/" + args.pre_train + ".bin")

        return

    # pre-train model
    if args.pre_train:
        model.load("/data/srd/models/image/" + args.pre_train + "/model.tfl",
                   weights_only=True)
    try:
        model.fit(train_feats,
                  train_labs,
                  n_epoch=args.epoch,
                  validation_set=(test_feats, test_labs),
                  snapshot_epoch=False,
                  snapshot_step=500,
                  show_metric=True,
                  batch_size=64,
                  shuffle=True,
                  run_id=args.model_name)
    except KeyboardInterrupt:
        print("Keyboard Interrupt")

    model.save("/data/srd/models/image/" + args.model_name + "/model.tfl")