Exemple #1
0
def save_bottleneck_features():
    model = MobileNet(include_top=False, weights='imagenet', input_shape=(150, 150, 3))
    print('load model ok')
    datagen = ImageDataGenerator(rescale=1. / 255)
 
    # train set image generator
    train_generator = datagen.flow_from_directory(
        '/data/lebron/data/mytrain',
        target_size=(150, 150),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False
    )
 
    # test set image generator
    test_generator = datagen.flow_from_directory(
        '/data/lebron/data/mytest',
        target_size=(150, 150),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False
    )
 
    # load weight
    model.load_weights(WEIGHTS_PATH_NO_TOP)
    print('load weight ok')
    # get bottleneck feature
    bottleneck_features_train = model.predict_generator(train_generator, 10)
    np.save(save_train_path, bottleneck_features_train)
 
    bottleneck_features_validation = model.predict_generator(test_generator, 2)
    np.save(save_test_path, bottleneck_features_validation)
Exemple #2
0
    def __init__(self, input_size):
        input_image = Input(shape=(input_size, input_size, 3))

        mobilenet = MobileNet(input_shape=(224, 224, 3), include_top=False)
        mobilenet.load_weights("data/mobilenet_backend.h5")
        x = mobilenet(input_image)

        self.feature_extractor = Model(input_image, x)
Exemple #3
0
    def __init__(self, input_shape):
        input_image = Input(shape=tuple(input_shape))

        mobilenet = MobileNet(input_shape=(224, 224, 3), include_top=False)
        mobilenet.load_weights(MOBILENET_BACKEND_PATH)

        x = mobilenet(input_image)

        self.feature_extractor = Model(input_image, x)
    def __init__(self, input_size, weights):
        input_image = Input(shape=(input_size, input_size, 3))

        mobilenet = MobileNet(input_shape=(224,224,3), include_top=False)
        if weights:
            mobilenet.load_weights(weights)

        x = mobilenet(input_image)
        self.feature_extractor = Model(input_image, x)  
Exemple #5
0
    def __init__(self, input_size, weights):
        input_image = Input(shape=(input_size, input_size, 3))

        mobilenet = MobileNet(input_shape=(224, 224, 3), include_top=False)
        if weights:
            mobilenet.load_weights(weights)

        x = mobilenet(input_image)
        self.feature_extractor = Model(input_image, x)
Exemple #6
0
    def __init__(self, input_size):
        input_image = Input(shape=(input_size, input_size, 3))

        mobilenet = MobileNet(input_shape=(224,224,3), include_top=False)
        mobilenet.load_weights(MOBILENET_BACKEND_PATH)

        x = mobilenet(input_image)

        self.feature_extractor = Model(input_image, x)  
Exemple #7
0
    def __init__(self, input_size):
        input_image = Input(shape=(input_size, input_size, 3))

        mobilenet = MobileNet(input_shape=(input_size, input_size, 3),
                              include_top=False)
        mobilenet.load_weights(MOBILENET_BACKEND_PATH, by_name=True)

        x = mobilenet(input_image)

        self.feature_extractor = Model(input_image, x)
Exemple #8
0
    def _build_model(self):
        logger.info("Building mobilenet...")
        print("building mobilenet")
        input_image = Input(shape=(self.input_size, self.input_size, 3))

        mobilenet = MobileNet(input_shape=(224, 224, 3), include_top=False)

        mobilenet.load_weights(self.weight_file)

        x = mobilenet(input_image)

        return Model(input_image, x)
    def __init__(self, model_path="weights/vasya_mobilenetv2_keypoints.h5", model_type = 'mobilenet_v2'):
        if model_type == 'mobilenet':
            model = MobileNet(input_shape=(112, 112, 3), classes=68*2, weights=None)
        else:
            model = MobileNetV2(input_shape=(112, 112, 3), classes=68*2, weights=None)

        out = model.layers[-2].output
        out = Dense(68*2, activation ='linear')(out)
        model = Model(inputs = model.input, outputs = out)
        model.compile(optimizer=Adam(), loss = 'mse')

        with open(model_path, 'rb') as f:
            model.load_weights(f)

        self.model = model
    def __init__(self, input_size):
        input_image = Input(shape=input_size)

        mobilenet = MobileNet(input_shape=(224,224,3), include_top=False)
        if input_size[2] == 3:
            try:
                mobilenet.load_weights(MOBILENET_BACKEND_PATH)
            except:
                print("Unable to load backend weigths. Using a fresh model")
        else:
            print('pre trained weights are avaliable just for RGB network.')

        x = mobilenet(input_image)

        self.feature_extractor = Model(input_image, x, name='MobileNet_backend')  
def mobilenet(weight_dir):
    model = MobileNet(input_shape=(64, 64, 1),
                      alpha=1.0,
                      depth_multiplier=1,
                      dropout=1e-3,
                      include_top=True,
                      weights=None,
                      input_tensor=None,
                      pooling='avg',
                      classes=4)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.load_weights(weight_dir)
    return model
Exemple #12
0
class Navigation:
    def __init__(self):
        self.imageSub = message_filters.Subscriber(
            "robot1/camera/rgb/image_raw", Image)
        self.odometrySub = message_filters.Subscriber("robot1/odom", Odometry)
        self.cmd_vel = rospy.Publisher("/robot1/mobile_base/commands/velocity",
                                       Twist,
                                       queue_size=1)

        ts = message_filters.TimeSynchronizer(
            [self.imageSub, self.odometrySub], 10)
        ts.registerCallback(self.callback)

        self.cv = CvBridge()
        self.lastImage = None
        self.OdomLX = None
        self.OdomAZ = None

        self.model = MobileNet()
        self.model.load_weights(
            "./weights/WeightsMobileNetTrain1/results/weights.59-0.01.hdf5")
        self.model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])

    def callback(self, imageMsg, odometryMsg):
        try:
            cv = self.bridge.imgmsg_to_cv2(imageMsg, "mono8")
            image = cv
            self.lastImage = cv.resize(image, (224, 224))
            self.OdomLX = odometryMsg.twist.twist.linear.x
            self.OdomAZ = odometryMsg.twist.twist.angular.z
        except Exception as e:
            print(e)

    def run(self):
        batch = [self.lastImage]
        inputModel = np.array(batch)
        output = self.model.predict(inputModel, batch_size=1, verbose=0)
        move_cmd = Twist()
        move_cmd.linear.x = output[0][0]
        move_cmd.angular.z = output[0][1]
def main():
    # Parameters
    if len(sys.argv) == 3:
        superclass = sys.argv[1]
        model_weight = sys.argv[2]
    else:
        print('Parameters error')
        exit()

    # The constants
    classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
    testName = {'A': 'a', 'F': 'a', 'V': 'b', 'E': 'b', 'H': 'b'}
    date = '20180321'

    # Feature extraction model
    base_model = MobileNet(include_top=True, weights=None,
                           input_tensor=None, input_shape=None,
                           pooling=None, classes=classNum[superclass[0]])
    base_model.load_weights(model_weight)
    base_model.summary()
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer('global_average_pooling2d_1').output)

    imgdir_train = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_'+date\
                     +'/zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_images_'+date
    imgdir_test = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_test_'+date
    categories = os.listdir(imgdir_train)
    categories.append('test')

    num = 0
    for eachclass in categories:
        if eachclass[0] == '.':
            continue
        if eachclass == 'test':
            classpath = imgdir_test
        else:
            classpath = imgdir_train+'/'+eachclass
        num += len(os.listdir(classpath))

    print('Total image number = '+str(num))

    features_all = np.ndarray((num, 1024))
    labels_all = list()
    images_all = list()
    idx = 0

    # Feature extraction
    for iter in tqdm(range(len(categories))):
        eachclass = categories[iter]
        if eachclass[0] == '.':
            continue
        if eachclass == 'test':
            classpath = imgdir_test
        else:
            classpath = imgdir_train+'/'+eachclass
        imgs = os.listdir(classpath)

        for eachimg in imgs:
            if eachimg[0] == '.':
                continue

            img_path = classpath+'/'+eachimg
            img = image.load_img(img_path, target_size=(224, 224))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            feature = model.predict(x)

            features_all[idx, :] = feature
            labels_all.append(eachclass)
            images_all.append(eachimg)
            idx += 1

    features_all = features_all[:idx, :]
    labels_all = labels_all[:idx]
    images_all = images_all[:idx]
    data_all = {'features_all':features_all, 'labels_all':labels_all,
                'images_all':images_all}

    # Save features
    savename = 'features_' + superclass + '.pickle'
    fsave = open(savename, 'wb')
    pickle.dump(data_all, fsave)
    fsave.close()
Exemple #14
0
    # x = Dense(512, activation='relu')(x)
    # x = Dropout(rate=0.3)(x)
    # predictions = Dense(2, activation='softmax')(x)
    # d_model = Model(inputs=base_model.input, outputs=predictions)
    #
    # for layer in base_model.layers:
    #     layer.trainable = False
    #
    # for layer in d_model.layers[:]:
    #     print(layer.name, layer.trainable)
    #
    # print(d_model.summary())
    # d_model.compile(optimizer=SGD(lr=1e-6, momentum=0.9), loss=EuiLoss, metrics=[y_t, y_pre, Acc])
    #3 MobileNet============================================================================================================
    base_model = MobileNet(weights=None, include_top=False)
    base_model.load_weights(
        filepath=r'F:\@data_response_2d\weight\3_mobilenet.h5', by_name=True)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dropout(rate=0.3)(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(rate=0.3)(x)
    predictions = Dense(2, activation='softmax')(x)
    d_model = Model(inputs=base_model.input, outputs=predictions)

    for layer in base_model.layers:
        layer.trainable = False

    for layer in d_model.layers[:]:
        print(layer.name, layer.trainable)

    pause()
Exemple #15
0
    params = json.load(json_file)

#Set the path of train and test. Also set the path to save the model

outing = "./"
weightfile = "./NIMA.hdf5"

#Define the base model for loading the structure of the mobilenet architecture.
base_model = MobileNet((224, 224, 3),
                       alpha=1,
                       include_top=False,
                       pooling='avg',
                       weights=None)
#Load the weights in the model from the NIMA architecture. We only require the convolution layers
#of the NIMA architecture
base_model.load_weights(weightfile, by_name=True)

#Define a model cut till the conv_pw_12_relu layer.
modelcut = build_bottleneck_model(base_model, 'conv_pw_13_relu')

#Add extra Depthwise conv BLocks
interimoutput = depthwise_conv_block(modelcut.output,
                                     2048,
                                     1,
                                     1,
                                     strides=(2, 2),
                                     block_id=14)

#Do Global Average pooling as intended in the NIMA paper.
interimoutput = GlobalAveragePooling2D()(interimoutput)
Exemple #16
0
def mobile_u_net(input_size):
    num_classes = 1

    # Build contracting path using pre-trained MobileNet
    contracting_path = MobileNet(input_shape=(input_size, input_size, 3),
                                 include_top=False,
                                 weights=None)
    contracting_path.load_weights('weights/mobilenet_1_0_224_tf_no_top.h5')
    for layer in contracting_path.layers:
        layer.trainable = False

    # Build expanding path based on U-Net straucture
    center = contracting_path.output
    up4 = UpSampling2D((2, 2))(center)
    conv_pw_11_relu = contracting_path.get_layer(name='conv_pw_11_relu').output
    up4 = concatenate([up4, conv_pw_11_relu], axis=3)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = PReLU()(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = PReLU()(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = PReLU()(up4)
    # 64

    up3 = UpSampling2D((2, 2))(up4)
    conv_pw_5_relu = contracting_path.get_layer(name='conv_pw_5_relu').output
    up3 = concatenate([up3, conv_pw_5_relu], axis=3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = PReLU()(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = PReLU()(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = PReLU()(up3)
    # 128

    up2 = UpSampling2D((2, 2))(up3)
    conv_pw_3_relu = contracting_path.get_layer(name='conv_pw_3_relu').output
    up2 = concatenate([up2, conv_pw_3_relu], axis=3)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = PReLU()(up2)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = PReLU()(up2)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = PReLU()(up2)
    # 256

    up1 = UpSampling2D((2, 2))(up2)
    conv_pw_1_relu = contracting_path.get_layer(name='conv_pw_1_relu').output
    up1 = concatenate([up1, conv_pw_1_relu], axis=3)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = PReLU()(up1)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = PReLU()(up1)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = PReLU()(up1)
    # 512

    up0 = UpSampling2D((2, 2))(up1)
    up0 = Conv2D(32, (3, 3), padding='same')(up0)
    up0 = BatchNormalization()(up0)
    up0 = PReLU()(up0)
    up0 = Conv2D(32, (3, 3), padding='same')(up0)
    up0 = BatchNormalization()(up0)
    up0 = PReLU()(up0)
    up0 = Conv2D(32, (3, 3), padding='same')(up0)
    up0 = BatchNormalization()(up0)
    up0 = PReLU()(up0)
    # 1024

    classifier = Conv2D(num_classes, (1, 1), activation='sigmoid')(up0)

    model = Model(inputs=contracting_path.input, outputs=classifier)

    return model
Exemple #17
0
test_datagen = ImageDataGenerator(
# rescale = 1./255,
horizontal_flip = True,
fill_mode = "nearest",
zoom_range = 0.3,
width_shift_range = 0.3,
height_shift_range=0.3,
rotation_range=180)

test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size = (img_height, img_width),
batch_size = batch_size, 
class_mode = "categorical")

#####get model#####
model = MobileNet(weights=None, classes=4)
model.load_weights('/model/mobnet.h5')
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
score,acc = model.evaluate_generator(test_generator, steps = (1050/batch_size)+1)
# calculate predictions
# pred = model.predict(test_features)
# print(test_labels.shape,pred.shape)
# print(test_labels[0],pred[0])
target_names = ['blade','gun','others','shuriken']
print("Test score: " + str(score))
print("Test accuracy: " + str(acc))


# print(classification_report(test_labels, pred,target_names=target_names))
Exemple #18
0
testSamplesNumber = countFiles(testFolder)

#model = resnet.ResnetBuilder.build_resnet_50((channels,img_width,img_height), numOfClasses) #_18 _34 _50 _101
#model = resnet_activity_reg.ResnetBuilder.build_resnet_50((channels,img_width,img_height), numOfClasses) #_18 _34 _50 _101
model = MobileNet(
    include_top=True,
    weights=None,
    #  '             input_tensor=Input(shape=input_shape))
    alpha=0.1,
    input_tensor=None,
    input_shape=input_shape,
    pooling='avg',
    classes=2)

if modelContinueFlag:
    model.load_weights(modelContinueWeigthsFile, by_name=False)

model.summary()
model.compile(
    loss='categorical_crossentropy',
    #model.compile(loss='categorical_hinge',
    optimizer=Adam(lr=startingLeraningRate,
                   beta_1=0.9,
                   beta_2=0.999,
                   epsilon=1e-08,
                   decay=0.0),
    #optimizer=RMSprop(lr=startingLeraningRate, rho=0.9, epsilon=None, decay=0.0),
    #optimizer=SGD(lr=startingLeraningRate, decay=1e-6, momentum=0.9, nesterov=True),
    metrics=['accuracy', 'categorical_accuracy']
)  # default lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0 or 0.00005
Exemple #19
0
def build_model(image_size,
                n_classes,
                mode='training',
                l2_regularization=0.0,
                min_scale=0.1,
                max_scale=0.9,
                scales=None,
                aspect_ratios_global=[0.5, 1.0, 2.0],
                aspect_ratios_per_layer=None,
                two_boxes_for_ar1=True,
                steps=None,
                offsets=None,
                clip_boxes=False,
                variances=[1.0, 1.0, 1.0, 1.0],
                coords='centroids',
                normalize_coords=False,
                subtract_mean=None,
                divide_by_stddev=None,
                swap_channels=False,
                confidence_thresh=0.01,
                iou_threshold=0.45,
                top_k=200,
                nms_max_output_size=400,
                return_predictor_sizes=False):
    '''
    Build a Keras model with SSD architecture, see references.

    The model consists of convolutional feature layers and a number of convolutional
    predictor layers that take their input from different feature layers.
    The model is fully convolutional.
    '''

    n_predictor_layers = 4  # The number of predictor conv layers in the network
    n_classes += 1  # Account for the background class.
    l2_reg = l2_regularization  # Make the internal name shorter.
    img_height, img_width, img_channels = image_size[0], image_size[
        1], image_size[2]

    ############################################################################
    # Get a few exceptions out of the way.
    ############################################################################

    if aspect_ratios_global is None and aspect_ratios_per_layer is None:
        raise ValueError(
            "`aspect_ratios_global` and `aspect_ratios_per_layer` cannot both be None. At least one needs to be specified."
        )
    if aspect_ratios_per_layer:
        if len(aspect_ratios_per_layer) != n_predictor_layers:
            raise ValueError(
                "It must be either aspect_ratios_per_layer is None or len(aspect_ratios_per_layer) == {}, but len(aspect_ratios_per_layer) == {}."
                .format(n_predictor_layers, len(aspect_ratios_per_layer)))

    if (min_scale is None or max_scale is None) and scales is None:
        raise ValueError(
            "Either `min_scale` and `max_scale` or `scales` need to be specified."
        )
    if scales:
        if len(scales) != n_predictor_layers + 1:
            raise ValueError(
                "It must be either scales is None or len(scales) == {}, but len(scales) == {}."
                .format(n_predictor_layers + 1, len(scales)))
    else:  # If no explicit list of scaling factors was passed, compute the list of scaling factors from `min_scale` and `max_scale`
        scales = np.linspace(min_scale, max_scale, n_predictor_layers + 1)

    if len(
            variances
    ) != 4:  # We need one variance value for each of the four box coordinates
        raise ValueError(
            "4 variance values must be pased, but {} values were received.".
            format(len(variances)))
    variances = np.array(variances)
    if np.any(variances <= 0):
        raise ValueError(
            "All variances must be >0, but the variances given are {}".format(
                variances))

    if (not (steps is None)) and (len(steps) != n_predictor_layers):
        raise ValueError(
            "You must provide at least one step value per predictor layer.")

    if (not (offsets is None)) and (len(offsets) != n_predictor_layers):
        raise ValueError(
            "You must provide at least one offset value per predictor layer.")

    ############################################################################
    # Compute the anchor box parameters.
    ############################################################################

    # Set the aspect ratios for each predictor layer. These are only needed for the anchor box layers.
    if aspect_ratios_per_layer:
        aspect_ratios = aspect_ratios_per_layer
    else:
        aspect_ratios = [aspect_ratios_global] * n_predictor_layers

    # Compute the number of boxes to be predicted per cell for each predictor layer.
    # We need this so that we know how many channels the predictor layers need to have.
    if aspect_ratios_per_layer:
        n_boxes = []
        for ar in aspect_ratios_per_layer:
            if (1 in ar) & two_boxes_for_ar1:
                n_boxes.append(len(ar) +
                               1)  # +1 for the second box for aspect ratio 1
            else:
                n_boxes.append(len(ar))
    else:  # If only a global aspect ratio list was passed, then the number of boxes is the same for each predictor layer
        if (1 in aspect_ratios_global) & two_boxes_for_ar1:
            n_boxes = len(aspect_ratios_global) + 1
        else:
            n_boxes = len(aspect_ratios_global)
        n_boxes = [n_boxes] * n_predictor_layers

    if steps is None:
        steps = [None] * n_predictor_layers
    if offsets is None:
        offsets = [None] * n_predictor_layers

    ############################################################################
    # Define functions for the Lambda layers below.
    ############################################################################

    def identity_layer(tensor):
        return tensor

    def input_mean_normalization(tensor):
        return tensor - np.array(subtract_mean)

    def input_stddev_normalization(tensor):
        return tensor / np.array(divide_by_stddev)

    def input_channel_swap(tensor):
        if len(swap_channels) == 3:
            return K.stack([
                tensor[..., swap_channels[0]], tensor[..., swap_channels[1]],
                tensor[..., swap_channels[2]]
            ],
                           axis=-1)
        elif len(swap_channels) == 4:
            return K.stack([
                tensor[..., swap_channels[0]], tensor[..., swap_channels[1]],
                tensor[..., swap_channels[2]], tensor[..., swap_channels[3]]
            ],
                           axis=-1)

    ############################################################################
    # Build the network.
    ############################################################################
    base_model = MobileNet(input_shape=(img_height, img_width, img_channels),
                           weights=None,
                           include_top=False)
    base_model.load_weights("G:/keras_weights/mobilenet_1_0_224_tf_no_top.h5")
    #base_model.summary()
    x = base_model.input
    #base_model.layers[3].output
    conv4 = base_model.get_layer("conv_pw_4_relu").output

    conv5 = base_model.get_layer("conv_pw_6_relu").output

    conv6 = base_model.get_layer("conv_pw_12_relu").output

    conv7 = base_model.get_layer("conv_pw_13_relu").output

    # Build the convolutional predictor layers on top of conv layers 4, 5, 6, and 7.
    # We build two predictor layers on top of each of these layers: One for class prediction (classification), one for box coordinate prediction (localization)
    # We precidt `n_classes` confidence values for each box, hence the `classes` predictors have depth `n_boxes * n_classes`
    # We predict 4 box coordinates for each box, hence the `boxes` predictors have depth `n_boxes * 4`
    # Output shape of `classes`: `(batch, height, width, n_boxes * n_classes)`
    classes4 = Conv2D(n_boxes[0] * n_classes, (3, 3),
                      strides=(1, 1),
                      padding="same",
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(l2_reg),
                      name='classes4')(conv4)
    classes5 = Conv2D(n_boxes[1] * n_classes, (3, 3),
                      strides=(1, 1),
                      padding="same",
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(l2_reg),
                      name='classes5')(conv5)
    classes6 = Conv2D(n_boxes[2] * n_classes, (3, 3),
                      strides=(1, 1),
                      padding="same",
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(l2_reg),
                      name='classes6')(conv6)
    classes7 = Conv2D(n_boxes[3] * n_classes, (3, 3),
                      strides=(1, 1),
                      padding="same",
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(l2_reg),
                      name='classes7')(conv7)
    # Output shape of `boxes`: `(batch, height, width, n_boxes * 4)`
    boxes4 = Conv2D(n_boxes[0] * 4, (3, 3),
                    strides=(1, 1),
                    padding="same",
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(l2_reg),
                    name='boxes4')(conv4)
    boxes5 = Conv2D(n_boxes[1] * 4, (3, 3),
                    strides=(1, 1),
                    padding="same",
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(l2_reg),
                    name='boxes5')(conv5)
    boxes6 = Conv2D(n_boxes[2] * 4, (3, 3),
                    strides=(1, 1),
                    padding="same",
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(l2_reg),
                    name='boxes6')(conv6)
    boxes7 = Conv2D(n_boxes[3] * 4, (3, 3),
                    strides=(1, 1),
                    padding="same",
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(l2_reg),
                    name='boxes7')(conv7)

    # Generate the anchor boxes
    # Output shape of `anchors`: `(batch, height, width, n_boxes, 8)`
    anchors4 = AnchorBoxes(img_height,
                           img_width,
                           this_scale=scales[0],
                           next_scale=scales[1],
                           aspect_ratios=aspect_ratios[0],
                           two_boxes_for_ar1=two_boxes_for_ar1,
                           this_steps=steps[0],
                           this_offsets=offsets[0],
                           clip_boxes=clip_boxes,
                           variances=variances,
                           coords=coords,
                           normalize_coords=normalize_coords,
                           name='anchors4')(boxes4)
    anchors5 = AnchorBoxes(img_height,
                           img_width,
                           this_scale=scales[1],
                           next_scale=scales[2],
                           aspect_ratios=aspect_ratios[1],
                           two_boxes_for_ar1=two_boxes_for_ar1,
                           this_steps=steps[1],
                           this_offsets=offsets[1],
                           clip_boxes=clip_boxes,
                           variances=variances,
                           coords=coords,
                           normalize_coords=normalize_coords,
                           name='anchors5')(boxes5)
    anchors6 = AnchorBoxes(img_height,
                           img_width,
                           this_scale=scales[2],
                           next_scale=scales[3],
                           aspect_ratios=aspect_ratios[2],
                           two_boxes_for_ar1=two_boxes_for_ar1,
                           this_steps=steps[2],
                           this_offsets=offsets[2],
                           clip_boxes=clip_boxes,
                           variances=variances,
                           coords=coords,
                           normalize_coords=normalize_coords,
                           name='anchors6')(boxes6)
    anchors7 = AnchorBoxes(img_height,
                           img_width,
                           this_scale=scales[3],
                           next_scale=scales[4],
                           aspect_ratios=aspect_ratios[3],
                           two_boxes_for_ar1=two_boxes_for_ar1,
                           this_steps=steps[3],
                           this_offsets=offsets[3],
                           clip_boxes=clip_boxes,
                           variances=variances,
                           coords=coords,
                           normalize_coords=normalize_coords,
                           name='anchors7')(boxes7)

    # Reshape the class predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, n_classes)`
    # We want the classes isolated in the last axis to perform softmax on them
    classes4_reshaped = Reshape((-1, n_classes),
                                name='classes4_reshape')(classes4)
    classes5_reshaped = Reshape((-1, n_classes),
                                name='classes5_reshape')(classes5)
    classes6_reshaped = Reshape((-1, n_classes),
                                name='classes6_reshape')(classes6)
    classes7_reshaped = Reshape((-1, n_classes),
                                name='classes7_reshape')(classes7)
    # Reshape the box coordinate predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, 4)`
    # We want the four box coordinates isolated in the last axis to compute the smooth L1 loss
    boxes4_reshaped = Reshape((-1, 4), name='boxes4_reshape')(boxes4)
    boxes5_reshaped = Reshape((-1, 4), name='boxes5_reshape')(boxes5)
    boxes6_reshaped = Reshape((-1, 4), name='boxes6_reshape')(boxes6)
    boxes7_reshaped = Reshape((-1, 4), name='boxes7_reshape')(boxes7)
    # Reshape the anchor box tensors, yielding 3D tensors of shape `(batch, height * width * n_boxes, 8)`
    anchors4_reshaped = Reshape((-1, 8), name='anchors4_reshape')(anchors4)
    anchors5_reshaped = Reshape((-1, 8), name='anchors5_reshape')(anchors5)
    anchors6_reshaped = Reshape((-1, 8), name='anchors6_reshape')(anchors6)
    anchors7_reshaped = Reshape((-1, 8), name='anchors7_reshape')(anchors7)

    # Concatenate the predictions from the different layers and the assosciated anchor box tensors
    # Axis 0 (batch) and axis 2 (n_classes or 4, respectively) are identical for all layer predictions,
    # so we want to concatenate along axis 1
    # Output shape of `classes_concat`: (batch, n_boxes_total, n_classes)
    classes_concat = Concatenate(axis=1, name='classes_concat')([
        classes4_reshaped, classes5_reshaped, classes6_reshaped,
        classes7_reshaped
    ])

    # Output shape of `boxes_concat`: (batch, n_boxes_total, 4)
    boxes_concat = Concatenate(axis=1, name='boxes_concat')(
        [boxes4_reshaped, boxes5_reshaped, boxes6_reshaped, boxes7_reshaped])

    # Output shape of `anchors_concat`: (batch, n_boxes_total, 8)
    anchors_concat = Concatenate(axis=1, name='anchors_concat')([
        anchors4_reshaped, anchors5_reshaped, anchors6_reshaped,
        anchors7_reshaped
    ])

    # The box coordinate predictions will go into the loss function just the way they are,
    # but for the class predictions, we'll apply a softmax activation layer first
    classes_softmax = Activation('softmax',
                                 name='classes_softmax')(classes_concat)

    # Concatenate the class and box coordinate predictions and the anchors to one large predictions tensor
    # Output shape of `predictions`: (batch, n_boxes_total, n_classes + 4 + 8)
    predictions = Concatenate(axis=2, name='predictions')(
        [classes_softmax, boxes_concat, anchors_concat])

    if mode == 'training':
        model = Model(inputs=x, outputs=predictions)
    elif mode == 'inference':
        decoded_predictions = DecodeDetections(
            confidence_thresh=confidence_thresh,
            iou_threshold=iou_threshold,
            top_k=top_k,
            nms_max_output_size=nms_max_output_size,
            coords=coords,
            normalize_coords=normalize_coords,
            img_height=img_height,
            img_width=img_width,
            name='decoded_predictions')(predictions)
        model = Model(inputs=x, outputs=decoded_predictions)
    elif mode == 'inference_fast':
        decoded_predictions = DecodeDetectionsFast(
            confidence_thresh=confidence_thresh,
            iou_threshold=iou_threshold,
            top_k=top_k,
            nms_max_output_size=nms_max_output_size,
            coords=coords,
            normalize_coords=normalize_coords,
            img_height=img_height,
            img_width=img_width,
            name='decoded_predictions')(predictions)
        model = Model(inputs=x, outputs=decoded_predictions)
    else:
        raise ValueError(
            "`mode` must be one of 'training', 'inference' or 'inference_fast', but received '{}'."
            .format(mode))

    if return_predictor_sizes:
        # The spatial dimensions are the same for the `classes` and `boxes` predictor layers.
        predictor_sizes = np.array([
            classes4._keras_shape[1:3], classes5._keras_shape[1:3],
            classes6._keras_shape[1:3], classes7._keras_shape[1:3]
        ])
        return model, predictor_sizes
    else:
        return model
    our_model = None

    if args.ref_model == "mobilenet":
        input_shape = (224, 224, 3)
        input_tensor = Input(input_shape)
        ref_model = MobileNet(weights=args.ref_model_weights,
                              include_top=False,
                              input_shape=input_shape,
                              input_tensor=input_tensor)
    else:
        assert args.ref_model_weights

        ref_model, helper = build_model(args.ref_model,
                                        args=None,
                                        for_training=False)
        ref_model.load_weights(args.ref_model_weights)

    if args.our_model:
        our_model, helper = build_model(args.our_model,
                                        args=None,
                                        for_training=False)

        if args.our_model_weights:
            print("Restore weights from '{}' for our model!".format(
                args.our_model_weights))
            our_model.load_weights(args.our_model_weights)
        else:
            print("Don't restore any weights for our model!")

    if args.method == "diff":
        print("Difference between:")
Exemple #21
0
def main():
    # Parameters
    if len(sys.argv) == 4:
        superclass = sys.argv[1]
        imgmove = sys.argv[2]
        if imgmove == 'False':
            imgmove = False
        else:
            imgmove = True
        lr = float(sys.argv[3])
    else:
        print('Parameters error')
        exit()

    # The constants
    classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
    testName = {'A': 'a', 'F': 'a', 'V': 'b', 'E': 'b', 'H': 'b'}
    date = '20180321'

    trainpath = 'trainval_' + superclass + '/train'
    valpath = 'trainval_' + superclass + '/val'

    if not os.path.exists('model'):
        os.mkdir('model')

    # Train/validation data preparation
    if imgmove:
        if not os.path.exists('trainval_' + superclass):
            os.mkdir('trainval_' + superclass)
            os.mkdir(trainpath)
            os.mkdir(valpath)
            sourcepath = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_'+date\
                         +'/zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_images_'+date
            categories = os.listdir(sourcepath)
            for eachclass in categories:
                if eachclass[0] == superclass[0]:
                    print(eachclass)
                    os.mkdir(trainpath + '/' + eachclass)
                    os.mkdir(valpath + '/' + eachclass)
                    imgs = os.listdir(sourcepath + '/' + eachclass)
                    idx = 0
                    for im in imgs:
                        if idx % 8 == 0:
                            shutil.copyfile(
                                sourcepath + '/' + eachclass + '/' + im,
                                valpath + '/' + eachclass + '/' + im)
                        else:
                            shutil.copyfile(
                                sourcepath + '/' + eachclass + '/' + im,
                                trainpath + '/' + eachclass + '/' + im)
                        idx += 1

    # Train and validation ImageDataGenerator
    batchsize = 32

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=15,
                                       width_shift_range=5,
                                       height_shift_range=5,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(trainpath,
                                                        target_size=(224, 224),
                                                        batch_size=batchsize)

    valid_generator = test_datagen.flow_from_directory(valpath,
                                                       target_size=(224, 224),
                                                       batch_size=batchsize)

    # Train MobileNet
    model = MobileNet(include_top=True,
                      weights=None,
                      input_tensor=None,
                      input_shape=None,
                      pooling=None,
                      classes=classNum[superclass[0]])
    model.summary()
    model.compile(optimizer=SGD(lr=lr, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    steps_per_epoch = int(train_generator.n / batchsize)
    validation_steps = int(valid_generator.n / batchsize)
    visual = TensorBoard(log_dir="model/",
                         histogram_freq=0,
                         batch_size=batchsize,
                         write_graph=True)
    weightname = 'model/mobile_' + superclass + '_wgt-{epoch:02d}-{val_loss:.4f}-{val_acc:.3f}.h5'

    checkpointer = ModelCheckpoint(weightname,
                                   monitor='val_loss',
                                   verbose=0,
                                   save_best_only=True,
                                   save_weights_only=True,
                                   mode='auto',
                                   period=1)

    model.load_weights('model/mobile_Animals_wgt.h5')
    model.fit_generator(train_generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=100,
                        validation_data=valid_generator,
                        validation_steps=validation_steps,
                        callbacks=[checkpointer, visual])
Exemple #22
0
app = Flask(__name__)

app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024
with open('./model/imagenet_class_index.json', encoding="utf-8") as f:
    zisyo = json.load(f)
    zisyo = {item["en"]: item["ja"] for item in zisyo}
model = MobileNet(input_shape=(128, 128, 3),
                  alpha=1.0,
                  depth_multiplier=1,
                  dropout=1e-3,
                  include_top=True,
                  weights=None,
                  input_tensor=None,
                  pooling=None,
                  classes=1000)
model.load_weights("./model/kerasmobilenet.h5")
bunrui = imagenet(model)


@app.route("/")
def index():
    return render_template('index.html')


@app.route('/send', methods=['post'])
def posttest():
    img_file = request.files['img_file']
    fileName = img_file.filename
    root, ext = os.path.splitext(fileName)
    ext = ext.lower()
    gazouketori = set([