Пример #1
0
def get_image():
    img = request.files.get('photo')
    path = "static/images/"
    file_path = path + generate_unique_id()
    img.save(file_path)

    return file_path
Пример #2
0
 def rotate(self, path_to_image):
     img = cv2.imread(path_to_image, 1)
     center = (img.shape[0] / 2, img.shape[0] / 2)
     angle = np.random.randint(1, 90, 1)
     M = cv2.getRotationMatrix2D(center, angle, 1.0)
     rotated = cv2.warpAffine(img, M, (img.shape[0], img.shape[1]))
     save_path = os.path.join(self.save_folder, 'rotate_' + str(angle[0]) + '_' + generate_unique_id())
     cv2.imwrite(save_path,rotated)
Пример #3
0
def ResNet34(path_to_image):
    img_dir = path_to_image
    img_data = np.array(cv2.imread(img_dir, 1)).astype('float32')
    img_data = cv2.resize(img_data, (224, 224))
    img_data = tf.reshape(img_data,
                          [1, img_data.shape[0], img_data.shape[1], 3])

    x = Conv2d_BN(img_data,
                  nb_filter=64,
                  kernel_size=(7, 7),
                  strides=(2, 2),
                  padding='valid')
    x = tf.layers.MaxPooling2D(pool_size=(3, 3),
                               strides=(2, 2),
                               padding='same')(x)
    # (56,56,64)
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    # (28,28,128)
    x = Conv_Block(x,
                   nb_filter=128,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    # (14,14,256)
    x = Conv_Block(x,
                   nb_filter=256,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    # (7,7,512)
    x = Conv_Block(x,
                   nb_filter=512,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = tf.layers.AveragePooling2D(pool_size=(7, 7), strides=2)(x)

    feature = x.numpy()
    feature = feature.astype('uint8')
    feature = np.reshape(feature, [32, 16])

    result_dir = 'static/images/extractor/' + 'ResNet34_' + generate_unique_id(
    )
    cv2.imwrite(result_dir, feature)

    return result_dir
Пример #4
0
    def saturation(self, path_to_image):
        img = cv2.imread(path_to_image, 1)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        if np.random.randint(1, 10, 1) < 5:
            temp = img[:, :,1] / 1.55
            img[:, :, 1] = temp
        else:
            temp = img[:, :, 1] * 1.55
            img[:, :, 1] = temp
        img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)

        save_path = os.path.join(self.save_folder, 'saturation_' + generate_unique_id())
        cv2.imwrite(save_path, img)
Пример #5
0
def ResNet50(path_to_image):
    img_dir = path_to_image
    img_data = np.array(cv2.imread(img_dir, 1)).astype('float32')
    img_data = cv2.resize(img_data, (224, 224))
    img_data = tf.reshape(img_data,
                          [1, img_data.shape[0], img_data.shape[1], 3])

    #x = keras.layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_data)
    x = tf.layers.Conv2D(64, (7, 7),
                         strides=(2, 2),
                         padding='valid',
                         kernel_initializer='he_normal',
                         name='conv1')(img_data)
    x = tf.layers.BatchNormalization(axis=3, name='bn_conv1')(x)

    #x = keras.layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
    x = tf.layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    x = tf.layers.AveragePooling2D(name='avg_pool',
                                   pool_size=[7, 7],
                                   strides=[2, 2])(x)

    feature = x.numpy()
    feature = feature.astype('uint8')
    feature = np.reshape(feature, [32, 64])

    result_dir = 'static/images/extractor/' + 'ResNet50_' + generate_unique_id(
    )
    cv2.imwrite(result_dir, feature)

    return result_dir
Пример #6
0
    def contrast_ratio(self, path_to_image):
        img = cv2.imread(path_to_image, 1)

        def gamma_trans(img, gamma):
            gamma_list = [np.power(x / 255.0, gamma) * 255.0 for x in range(256)]
            gamma_table = np.round(np.array(gamma_list)).astype(np.uint8)
            return cv2.LUT(img, gamma_table)

        if np.random.randint(1, 10, 1) < 5:
            img_gamma = gamma_trans(img, np.random.rand(1))
        else:
            img_gamma = gamma_trans(img, 2 + np.random.rand(1))

        save_path = os.path.join(self.save_folder, 'contrast_ratio_' + generate_unique_id())
        cv2.imwrite(save_path, img_gamma)
Пример #7
0
Файл: app.py Проект: lighTQ/ADSP
def detect():
    get_image()

    min_threshold = float(request.values.get('threshold'))

    output_dict, result_ad = detection(
        path_to_frozen_model='frozen_model/frozen_inference_graph.pb',
        path_to_labels='frozen_model/label_map.pbtxt',
        path_to_images='static/images/test.jpg',
        path_to_results='static/images/' +
        generate_unique_id.generate_unique_id(),
        min_score_thresh=min_threshold)
    data = analysis(output_dict, 5)
    data['address'] = result_ad
    data = json.dumps(data, cls=encoder)
    data = json.loads(data)

    return render_template('index.html', data=data)
Пример #8
0
    def noise(self, path_to_image):
        # 定义添加椒盐噪声的函数
        def SaltAndPepper(src, percetage):
            SP_NoiseImg = src
            SP_NoiseNum = int(percetage * src.shape[0] * src.shape[1])
            for i in range(SP_NoiseNum):
                randX = np.random.random_integers(0, src.shape[0] - 1)
                randY = np.random.random_integers(0, src.shape[1] - 1)
                if np.random.random_integers(0, 1) == 0:
                    SP_NoiseImg[randX, randY] = 0
                else:
                    SP_NoiseImg[randX, randY] = 255
            return SP_NoiseImg

        img = cv2.imread(path_to_image, 1)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gauss_noiseImage = SaltAndPepper(img, 0.01)

        save_path = os.path.join(self.save_folder, 'noise_' + generate_unique_id())
        cv2.imwrite(save_path, gauss_noiseImage)
Пример #9
0
def LeNet5(path_to_image):
    img_dir = path_to_image
    img_data = np.array(cv2.imread(img_dir, 1)).astype('float32')
    img_data = cv2.resize(img_data, (224, 224))
    img_data = tf.reshape(img_data,
                          [1, img_data.shape[0], img_data.shape[1], 3])

    feature = tf.layers.Conv2D(filters=6,
                               kernel_size=[5, 5],
                               strides=(1, 1),
                               padding='same',
                               activation=tf.nn.relu)(img_data)
    feature = tf.layers.MaxPooling2D(pool_size=(2, 2),
                                     strides=(2, 2),
                                     padding='same')(feature)
    feature = tf.layers.Conv2D(filters=16,
                               kernel_size=[5, 5],
                               strides=(1, 1),
                               padding='valid',
                               activation=tf.nn.relu)(feature)
    feature = tf.layers.MaxPooling2D(pool_size=(2, 2),
                                     strides=(2, 2),
                                     padding='same')(feature)
    feature = tf.layers.Conv2D(filters=120,
                               kernel_size=(5, 5),
                               strides=(1, 1),
                               padding='same',
                               activation=tf.nn.relu)(feature)

    feature = feature.numpy()
    feature = feature.astype('uint8')
    feature = feature[0][:, :, 0]
    result_dir = 'static/images/extractor/' + 'LeNet5_' + generate_unique_id()
    cv2.imwrite(result_dir, feature)

    return result_dir
Пример #10
0
def standard_conv(strides, padding, filter_size, path_to_image):
    data = {
        'path_conv1': [],
        'path_conv2': [],
        'path_conv3': [],
        'path_conv4': [],
        'histogram_1C': [],
        'histogram_2C': [],
        'histogram_3C': []
    }

    img_dir = path_to_image
    img_data = np.array(cv2.imread(img_dir, 1)).astype('float32')
    img_data = tf.reshape(img_data,
                          [1, img_data.shape[0], img_data.shape[1], 3])

    for i in range(4):
        img_data = tf.layers.conv2d(inputs=img_data,
                                    filters=1,
                                    kernel_size=[filter_size, filter_size],
                                    strides=[strides, strides],
                                    padding=padding)

        img_data = np.matrix(img_data.numpy())
        img_data = img_data.astype('uint8')

        result_dir = 'static/images/conv/' + str(i) + generate_unique_id()
        cv2.imwrite(result_dir, img_data)
        key = 'path_conv' + str(i + 1)
        # save the path of each feature map
        data[key] = result_dir

        img_data = tf.reshape(img_data,
                              [1, img_data.shape[0], img_data.shape[1], 1])
        img_data = tf.cast(img_data, dtype=tf.float32)
    return data
Пример #11
0
 def mirroring(self, path_to_image):
     img = cv2.imread(path_to_image, 1)
     img_flip = cv2.flip(img, 1)
     save_path = os.path.join(self.save_folder, 'mirroring_' + generate_unique_id())
     cv2.imwrite(save_path, img_flip)
Пример #12
0
 def shear(self, path_to_image):
     img = cv2.imread(path_to_image, 1)
     (h, w) = img.shape[:2]
     cropped = img[int(h/4):int(h/2), int(w/4):int(w/2)]
     save_path = os.path.join(self.save_folder, 'shear_'+ generate_unique_id())
     cv2.imwrite(save_path, cropped)
Пример #13
0
 def zoom_out(self, path_to_image):
     img = cv2.imread(path_to_image, 1)
     (h, w) = img.shape[:2]
     change = np.random.randint(1, 30, 1)
     dst_size = (h - change, w - change)
     method = cv2.INTER_NEAREST
     resized = cv2.resize(img, dst_size, interpolation=method)
     save_path = os.path.join(self.save_folder, 'zoom_out_' + str(change[0])+ '_' + generate_unique_id())
     cv2.imwrite(save_path, resized)
Пример #14
0
def VGGNet19(path_to_image):
    img_dir = path_to_image
    img_data = np.array(cv2.imread(img_dir, 1)).astype('float32')
    img_data = cv2.resize(img_data, (224, 224))
    img_data = tf.reshape(img_data, [1, img_data.shape[0], img_data.shape[1], 3])

    # Block 1
    x = tf.layers.Conv2D(64, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block1_conv1')(img_data)
    x = tf.layers.Conv2D(64, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block1_conv2')(x)
    x = tf.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = tf.layers.Conv2D(128, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block2_conv1')(x)
    x = tf.layers.Conv2D(128, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block2_conv2')(x)
    x = tf.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = tf.layers.Conv2D(256, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block3_conv1')(x)
    x = tf.layers.Conv2D(256, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block3_conv2')(x)
    x = tf.layers.Conv2D(256, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block3_conv3')(x)
    x = tf.layers.Conv2D(256, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block3_conv4')(x)
    x = tf.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = tf.layers.Conv2D(512, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block4_conv1')(x)
    x = tf.layers.Conv2D(512, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block4_conv2')(x)
    x = tf.layers.Conv2D(512, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block4_conv3')(x)
    x = tf.layers.Conv2D(512, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block4_conv4')(x)
    x = tf.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = tf.layers.Conv2D(512, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block5_conv1')(x)
    x = tf.layers.Conv2D(512, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block5_conv2')(x)
    x = tf.layers.Conv2D(512, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block5_conv3')(x)
    x = tf.layers.Conv2D(512, (3, 3),
                      activation=tf.nn.relu,
                      padding='same',
                      name='block5_conv4')(x)
    x = tf.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    feature = x.numpy()
    feature = feature.astype('uint8')
    feature = feature[0][:, :, 0]
    result_dir = 'static/images/extractor/' + 'VGGNet19_' + generate_unique_id()
    cv2.imwrite(result_dir,feature)

    return result_dir
Пример #15
0
def AlexNet(path_to_image):
    img_dir = path_to_image
    img_data = np.array(cv2.imread(img_dir, 1)).astype('float32')
    img_data = cv2.resize(img_data, (224, 224))
    img_data = tf.reshape(img_data,
                          [1, img_data.shape[0], img_data.shape[1], 3])

    # Define the converlutional layer 1
    conv1 = tf.layers.Conv2D(filters=96,
                             kernel_size=[11, 11],
                             strides=[2, 2],
                             activation=tf.nn.relu,
                             use_bias=True,
                             padding='valid')(img_data)

    # Define the pooling layer 1
    pooling1 = tf.layers.AveragePooling2D(pool_size=[3, 3],
                                          strides=[2, 2],
                                          padding='valid')(conv1)

    # Define the standardization layer 1
    stand1 = tf.layers.BatchNormalization(axis=3)(pooling1)

    # Define the converlutional layer 2
    conv2 = tf.layers.Conv2D(filters=256,
                             kernel_size=[5, 5],
                             strides=[2, 2],
                             activation=tf.nn.relu,
                             use_bias=True,
                             padding='valid')(stand1)

    # Defien the pooling layer 2
    pooling2 = tf.layers.AveragePooling2D(pool_size=[3, 3],
                                          strides=[2, 2],
                                          padding='valid')(conv2)

    # Define the standardization layer 2
    stand2 = tf.layers.BatchNormalization(axis=3)(pooling2)

    # Define the converlutional layer 3
    conv3 = tf.layers.Conv2D(filters=384,
                             kernel_size=[3, 3],
                             strides=[1, 1],
                             activation=tf.nn.relu,
                             use_bias=True,
                             padding='valid')(stand2)

    # Define the converlutional layer 4
    conv4 = tf.layers.Conv2D(filters=384,
                             kernel_size=[3, 3],
                             strides=[1, 1],
                             activation=tf.nn.relu,
                             use_bias=True,
                             padding='valid')(conv3)

    # Define the converlutional layer 5
    conv5 = tf.layers.Conv2D(filters=382,
                             kernel_size=[3, 3],
                             strides=[2, 2],
                             activation=tf.nn.relu,
                             use_bias=True,
                             padding='valid')(conv4)

    feature = conv5.numpy()
    feature = feature.astype('uint8')
    feature = feature[0][:, :, 0]

    result_dir = 'static/images/extractor/' + 'AlexNet_' + generate_unique_id()
    cv2.imwrite(result_dir, feature)

    return result_dir