コード例 #1
0
ファイル: visualize.py プロジェクト: olest/dsde-deep-learning
def model_from_args(args):
	'''Return a model as specified by the command line/ default arguments'''
	K.set_learning_phase(0)

	if K.image_data_format() == 'channels_first':
		args.input_shape = (args.channels, args.height, args.width)
	else:
		args.input_shape = (args.height, args.width, args.channels)
		
	input_image = Input(shape=args.input_shape, name='input_image')		
	
	if os.path.exists(args.weights):
		model = load_model(args.weights)
	elif 'inception' == args.model:
		if args.height == 299 and args.width == 299:
			model = inception_v3.InceptionV3(input_tensor=input_image, input_shape=args.input_shape, include_top=True)
		else:
			model = inception_v3.InceptionV3(input_tensor=input_image, input_shape=args.input_shape, include_top=False, pooling='max')

	elif 'vgg' == args.model:
		if args.height == 224 and args.width == 224:
			model = vgg16.VGG16(input_tensor=input_image, include_top=True)
		else:
			model = vgg16.VGG16(input_tensor=input_image, input_shape=args.input_shape, include_top=False, pooling='max')
	else:
		print('\n\nError: unknown model architecture:', args.model)

	if args.convert_kernels:
		convert_all_kernels_in_model(model)

	model.summary()
	return model
コード例 #2
0
ファイル: custom_models.py プロジェクト: vkk800/neural-fun
def custom_inception(input_tensor, num_classes, weights=None, optimizer=keras.optimizers.SGD(), fc=0):
    model_base = inception_v3.InceptionV3(include_top=False, input_tensor=input_tensor,
                                          weights='imagenet', pooling='avg')

    for ll in model_base.layers[1:]:
        ll.trainable = False
        ll.name += '_inc'

    inp_tensor = model_base.input
    x = model_base.output
    for i in range(fc):
        x = Dense(256, activation='relu', name='top_dense'+str(i)+'_inc')(x)
        x = Dropout(0.1)(x)
    x = Dense(num_classes, activation='softmax', name='top_predictions_inc')(x)

    model = keras.Model(inputs=inp_tensor, outputs=x)
    try:
        model.load_weights(weights, by_name=False)
    except:
        print("Weight file {} could not be loaded. Using ImageNet weights.".format(weights))

    model.preprocessor = inception_v3.preprocess_input

    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
コード例 #3
0
def main():
    labels = pd.read_csv(data_dir + 'labels.csv')
    num_classes = len(labels.groupby('breed'))
    selected_labels = labels.groupby('breed').count().sort_values(
        by='id', ascending=False).head(num_classes).index.values
    labels = labels[labels['breed'].isin(selected_labels)]
    labels['target'] = 1
    labels['rank'] = labels.groupby('breed').rank()['id']
    labels_pivot = labels.pivot('id', 'breed', 'target').reset_index().fillna(
        0)  # values必须是breed和target对应的值
    np.random.seed(SEED)
    y_train = labels_pivot[selected_labels].values
    ytr = y_train

    x_train = np.zeros((len(labels), INPUT_SIZE, INPUT_SIZE, 3),
                       dtype='float32')
    for i, img_id in tqdm(enumerate(labels['id'])):
        # print i, img_id
        img = read_img(img_id, 'train', (INPUT_SIZE, INPUT_SIZE))
        x = xception.preprocess_input(np.expand_dims(img.copy(), axis=0))
        x_train[i] = x
    print('Train Images shape: {} size: {:,}'.format(x_train.shape,
                                                     x_train.size))

    num_tests = len(listdir(data_dir + '/test/'))
    x_test = np.zeros((num_tests, INPUT_SIZE, INPUT_SIZE, 3), dtype='float32')
    test_id = []
    for i in range(num_tests):
        img_file_name = listdir(data_dir + '/test/')[i]
        img_id = img_file_name[0:len(img_file_name) - 4]
        img = read_img(img_id, 'test', (INPUT_SIZE, INPUT_SIZE))
        x = xception.preprocess_input(np.expand_dims(img.copy(), axis=0))
        x_test[i] = x
        test_id.append(img_id)

    xtr = x_train
    xception_bottleneck = xception.Xception(weights='imagenet',
                                            include_top=False,
                                            pooling=POOLING)
    train_x_bf = xception_bottleneck.predict(xtr, batch_size=32, verbose=1)
    valid_x_bf = xception_bottleneck.predict(x_test, batch_size=32, verbose=1)

    inception_bottleneck = inception_v3.InceptionV3(weights='imagenet',
                                                    include_top=False,
                                                    pooling=POOLING)
    train_i_bf = inception_bottleneck.predict(xtr, batch_size=32, verbose=1)
    valid_i_bf = inception_bottleneck.predict(x_test, batch_size=32, verbose=1)

    train_x = np.hstack([train_x_bf, train_i_bf])
    test_x = np.hstack([valid_x_bf, valid_i_bf])
    data = {
        'train_x': train_x,
        'train_y': ytr,
        'test_x': test_x,
        "num_class": num_classes,
        "selected_labels": selected_labels,
        "test_id": test_id
    }
    with open('xicpt_data.pickle', 'wb') as handle:
        pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
コード例 #4
0
def inception_model(train_data, train_label, test_data, test_label):
    input_tensor = Input(shape=(299,299,3))
    base_model = inception_v3.InceptionV3(input_tensor=input_tensor,include_top=False, weights='imagenet')
    x = base_model.output
    x = AveragePooling2D(pool_size=(8,8))(x)
    x = Flatten()(x)
    #x = Dense(10, activation='relu')(x)
    predictions = Dense(5,activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
    for layer in model.layers[0:-1]:
        layer.trainable = False
    model.compile(optimizer=Adam(lr=0.0001),loss='categorical_crossentropy',metrics=['acc'])
    #model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='categorical_crossentropy',metrics=['acc'])
    
    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(train_data, train_label, batch_size=batch_size,
                  epochs=epochs, validation_data=(test_data, test_label),
                  shuffle=True, verbose=2)
    else:
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            zca_epsilon=1e-06,  # epsilon for ZCA whitening
            rotation_range=45,  # randomly rotate images in the range (degrees, 0 to 180)
            # randomly shift images horizontally (fraction of total width)
            width_shift_range=0.1,
            # randomly shift images vertically (fraction of total height)
            height_shift_range=0.1,
            shear_range=0.,  # set range for random shear
            zoom_range=[0.8, 1.2],  # set range for random zoom
            channel_shift_range=0.,  # set range for random channel shifts
            # set mode for filling points outside the input boundaries
            fill_mode='nearest',
            cval=0.,  # value used for fill_mode = "constant"
            horizontal_flip=True,  # randomly flip images
            vertical_flip=True,  # randomly flip images
            # set rescaling factor (applied before any other transformation)
            rescale=None,
            # set function that will be applied on each input
            preprocessing_function=None,
            # image data format, either "channels_first" or "channels_last"
            data_format=None,
            # fraction of images reserved for validation (strictly between 0 and 1)
            validation_split=0.0
        )

        datagen.fit(train_data)

        # Fit the model on the batches generated by datagen.flow().
        model.fit_generator(datagen.flow(train_data, train_label,
                                         batch_size=batch_size),
                            epochs=epochs,
                            validation_data=(test_data, test_label),steps_per_epoch=50,verbose=2)

    model.save("5.h5")
コード例 #5
0
def img_confi_worker(img_files, model, path):
    if model == 'resnet50':
        import keras.applications.resnet50 as resnet50
        model = resnet50.ResNet50(weights='imagenet')
        pak = resnet50
    elif model == 'xception':
        import keras.applications.xception as xception
        model = xception.Xception(weights='imagenet')
        pak = xception
    elif model == 'inception_v3':
        import keras.applications.inception_v3 as inception_v3
        model = inception_v3.InceptionV3(weights='imagenet')
        pak = inception_v3
    else:
        raise RuntimeError("don't have this model")
    from img_tool import image_classify

    img_confi = []

    for i, img_f in tqdm(enumerate(img_files)):
        img_feat = []
        if isinstance(img_f, str):
            img_feat += list(image_classify(model, pak, path + img_f + '.jpg'))
        img_confi.append(img_feat)

    return img_confi
コード例 #6
0
def judge(path):

    if not hasattr(judge, 'inception_model'):

        judge.inception_model = inception_v3.InceptionV3(weights='imagenet')

    if not hasattr(judge, 'label'):

        with open(os.path.join(base_dir, 'utils/ImageNet_Label.json'),
                  'r',
                  encoding='utf-8') as file:

            judge.label = json.load(file)

    original = load_img(path, target_size=(299, 299))

    numpy_image = img_to_array(original)

    image_batch = np.expand_dims(numpy_image, axis=0)

    processed_image = inception_v3.preprocess_input(image_batch.copy())

    preds = judge.inception_model.predict(processed_image)

    print(imagenet_utils.decode_predictions(preds)[0])

    result = [
        judge.label.get(item[0], '其他')
        for item in imagenet_utils.decode_predictions(preds)[0]
    ]

    return result
コード例 #7
0
def save_bottleneck_features():
    # build the Inception V3 network
    model = inception_v3.InceptionV3(include_top=False,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling='avg')

    # Save the bottleneck features for the training data set
    datagen = ImageDataGenerator(preprocessing_function=
                                 inception_v3.preprocess_input)
    generator = datagen.flow_from_directory(train_data_dir,
                                            target_size=(img_width, img_height),
                                            batch_size=batch_size,
                                            class_mode='sparse',
                                            shuffle=False)
    features = model.predict_generator(generator, nb_train_samples // batch_size)
    labels = np.eye(generator.num_class, dtype='uint8')[generator.classes]
    labels = labels[0:(nb_train_samples // batch_size) * batch_size]
    np.save(open(output_dir+'bottleneck_features_train.npy', 'wb'), features)
    np.save(open(output_dir+'bottleneck_labels_train.npy', 'wb'), labels)

    # Save the bottleneck features for the validation data set
    generator = datagen.flow_from_directory(validation_data_dir,
                                            target_size=(img_width, img_height),
                                            batch_size=batch_size,
                                            class_mode=None,
                                            shuffle=False)
    features = model.predict_generator(generator, nb_validation_samples // batch_size)
    labels = np.eye(generator.num_class, dtype='uint8')[generator.classes]
    labels = labels[0:(nb_validation_samples // batch_size) * batch_size]
    np.save(open(output_dir+'bottleneck_features_validation.npy', 'wb'), features)
    np.save(open(output_dir+'bottleneck_labels_validation.npy', 'wb'), labels)
コード例 #8
0
    def build_model():
        base_model = inception_v3.InceptionV3(include_top=False,
                                              weights='imagenet',
                                              input_shape=(299, 299, 3))
        # base_model = VGG16(include_top=False, weights='imagenet', input_shape=(299, 299, 3))

        # Freeze the layers except the last 2 layers
        # freeze almost all layers because the current task is very similar for base model pre-trained classes
        for layer in base_model.layers[:-2]:
            layer.trainable = False

        # check if right number of layers are freeze
        for layer in base_model.layers:
            print(layer, layer.trainable)

        model = Sequential([
            base_model,
            Flatten(),
            Dense(64, activation='relu', name='dense_1'),
            Dropout(0.5),
            Dense(3, activation='softmax', name='dense_2')
        ])
        model.summary()

        return model
コード例 #9
0
ファイル: test_gradient.py プロジェクト: dbvis-ukon/explainer
def main():

    inet_model = inception_v3.InceptionV3()

    url = 'https://pbs.twimg.com/profile_images/1046968391389589507/_0r5bQLl_400x400.jpg'
    response = requests.get(url)
    img_org = Image.open(BytesIO(response.content))

    x = image.img_to_array(img_org)
    x = np.expand_dims(x, axis=0)
    x = inception_v3.preprocess_input(x)

    gradient_methods_results = []

    gradient_methods = [
        'grad*input', 'intgrad', 'gradient', 'smoothgrad', 'input_t_gradient',
        'integrated_gradients'
    ]

    for gradient in gradient_methods:
        explain = GradientExplainer(gradient)
        img = explain.explain(inet_model, x, decode_predictions)
        gradient_methods_results.append(img)

    gradient_methods_len = len(gradient_methods_results)
    fig, axes = plt.subplots(1, gradient_methods_len + 1)

    for i, img in enumerate(gradient_methods_results):
        if img is None:
            img = np.zeros(x.shape)[0]
        axes[i].imshow(img)
    axes[gradient_methods_len].imshow(img_org)

    fig.show()
    plt.show()
コード例 #10
0
def create_Inception(freeze=False, dropout_rate=0.75, verbose=1):

    # set the image input size
    image_size = 224

    # load the pre-trained model
    inception = inception_v3.InceptionV3(include_top=True,
                                         weights='imagenet',
                                         input_shape=(image_size, image_size,
                                                      3))

    # remove last layer
    inception.layers.pop()

    if freeze:
        # freeze all the layers
        for layer in inception.layers:
            layer.trainable = False

    # get the output of the model
    x = inception.layers[-1].output

    # add last FC layer
    x = Dropout(dropout_rate)(x)
    x = Dense(10, kernel_initializer='he_uniform', activation='softmax')(x)

    # create the new model
    inception = Model(inception.input, x)

    # show summary
    if verbose > 1:
        inception.summary()

    return inception
コード例 #11
0
def main():

    inet_model = inception_v3.InceptionV3()

    url = 'https://pbs.twimg.com/profile_images/1046968391389589507/_0r5bQLl_400x400.jpg'
    response = requests.get(url)
    img_org = Image.open(BytesIO(response.content))
    img_org = img_org.resize((299, 299), Image.ANTIALIAS)

    x = image.img_to_array(img_org)
    x = np.expand_dims(x, axis=0)
    x = inception_v3.preprocess_input(x)

    lrp_methods_results = []

    lrp_methods = ['lrp.z', 'lrp.epsilon',
                   'lrp.sequential_preset_a_flat',
                   'lrp.sequential_preset_b_flat',
                   'elrp']

    for lrp in lrp_methods:
        explain = LRPExplainer(lrp)
        img = explain.explain(inet_model, x, decode_predictions)
        lrp_methods_results.append(img)

    lrp_methods_len = len(lrp_methods_results)
    fig, axes = plt.subplots(1, lrp_methods_len + 1)

    for i, img in enumerate(lrp_methods_results):
        axes[i].imshow(img)
    axes[lrp_methods_len].imshow(img_org)

    # fig.show()
    cur_time = datetime.datetime.today().strftime("%d-%m-%Y-%H-%M-%S")
    plt.savefig('tests/test_img/'+cur_time+'-lrp.jpg')
コード例 #12
0
def build_network():
    base_model = inception_v3.InceptionV3(weights='imagenet',
                                          include_top=False,
                                          input_shape=(224, 224, 3))
    base_model2 = models.Model(inputs=base_model.input,
                               outputs=base_model.get_layer('mixed10').output)
    flat = layers.GlobalAveragePooling2D()(base_model2.output)
    x = layers.Dense(128, activation='relu')(flat)
    output_amdState = layers.Dense(2,
                                   activation='sigmoid',
                                   name='output_amdState')(x)
    geno = layers.Input(shape=(52, ), name="geno_input")  #52 SNPs
    concatenatedFeatures = layers.Concatenate(axis=1)([output_amdState, geno])
    x = layers.Dense(4, activation='relu')(concatenatedFeatures)
    output_time = layers.Dense(1, activation='sigmoid', name='output_time')(x)
    model = models.Model(inputs=[base_model2.input, geno],
                         outputs=[output_amdState, output_time])
    opt = optimizers.Adam(lr=0.001,
                          beta_1=0.9,
                          beta_2=0.999,
                          epsilon=1e-08,
                          decay=0.0)
    base_model2.trainable = False
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc'])
    return model
コード例 #13
0
 def __init__(self):
     self.__model = inception_v3.InceptionV3(input_shape=(INPUT_IMG_SIZE,
                                                          INPUT_IMG_SIZE,
                                                          3),
                                             weights='imagenet',
                                             include_top=False,
                                             pooling='avg')
コード例 #14
0
def Exp2(image,label):
    tf.keras.backend.set_learning_phase(0)

    model=inception_v3.InceptionV3(include_top=True,weights='imagenet',input_tensor=None,input_shape=None,pooling=None,classes=1000)

    image_arr=[]
    print('preprocess image')
    for img in tqdm(image):
        image_arr.append(cv2.resize(img,(299,299)))
    image_arr=np.array(image_arr)
    image_arr=imagenet_utils.preprocess_input(image_arr,mode='tf')
    #image_arr=image_arr[:4000]
    print('model predict')
    pred=model.predict(image_arr)
    pred_top5=imagenet_utils.decode_predictions(pred,top=5)
    pred_top5=np.array(pred_top5)

    acc=[]
    entropy=[]
    random_entropy=[]

    print('random select')

    for i in tqdm(range(1000)):
        #image.shape[0]
        index_choice=np.random.choice(range(image.shape[0]),size=2500,replace=False)
        temp1=top5_score(label[index_choice],pred_top5[index_choice])
        temp2=graph_distance(pred[index_choice])
        temp3=graph_distance(pred[index_choice],span=False)
        acc.append(temp1)
        entropy.append(temp2)
        random_entropy.append(temp3)

    return acc,entropy,random_entropy
コード例 #15
0
ファイル: Vgg_model.py プロジェクト: RamjiB/Comet
def pretrained_model(model):
    if model == 'densenet':
        base_model = densenet.DenseNet121(include_top=False,
                                          weights='imagenet',
                                          input_shape=(IMG_SIZE, IMG_SIZE, 3))
    elif model == 'inception':
        base_model = inception_v3.InceptionV3(include_top=False,
                                              weights='imagenet',
                                              input_shape=(IMG_SIZE, IMG_SIZE,
                                                           3))
    elif model == 'mobilenet':
        base_model = mobilenet.MobileNet(include_top=False,
                                         weights='imagenet',
                                         input_shape=(IMG_SIZE, IMG_SIZE, 3))
    elif model == 'vgg':
        base_model = vgg19.VGG19(include_top=False,
                                 weights='imagenet',
                                 input_shape=(IMG_SIZE, IMG_SIZE, 3))
    elif model == 'resnet':
        base_model = resnet50.ResNet50(include_top=False,
                                       weights='imagenet',
                                       input_shape=(IMG_SIZE, IMG_SIZE, 3))
    elif model == 'xception':
        base_model = xception.Xception(include_top=False,
                                       weights='imagenet',
                                       input_shape=(IMG_SIZE, IMG_SIZE, 3))
    for layer in base_model.layers:
        layer.trainable = True
    x = base_model.output
    x = Flatten()(x)
    x = Dense(150, activation='relu')(x)
    x = Dropout(0.2)(x)
    predictions = Dense(1, activation='sigmoid')(x)

    return models.Model(base_model.input, predictions)
コード例 #16
0
ファイル: server.py プロジェクト: abecadel/aksel-hackaton
def activate_job():
    global graph
    global inception_model
    global merc_cat
    graph = tf.get_default_graph()
    inception_model = inception_v3.InceptionV3(weights='imagenet')
    merc_cat = MercCategories('sales_to_list_ratio.csv')
コード例 #17
0
def e2e_pretrained_network():
    vgg16_model = vgg16.VGG16(weights="imagenet", include_top=True)

    # vgg19_model = vgg19.VGG19(weights="imagenet", include_top=True)
    inception_model = inception_v3.InceptionV3(weights="imagenet", include_top=True)

    input_shape = (224, 224, 3)
    image_left = layers.Input(shape=input_shape)
    image_right = layers.Input(shape=input_shape)

    base_model = models.Model(input=vgg16_model.input, output=vgg16_model.get_layer('fc2').output)
    left_vec = base_model(image_left)
    right_vec = base_model(image_right)
    dist = layers.Lambda(cosine_distance, output_shape=cosine_distance_output_shape)([left_vec, right_vec])
    fc1 = layers.Dense(128, kernel_initializer="glorot_uniform")(dist)
    fc1 = layers.Dropout(0.2)(fc1)
    fc1 = layers.Activation("relu")(fc1)

    pred = layers.Dense(2, kernel_initializer="glorot_uniform")(fc1)
    pred = layers.Activation("softmax")(pred)

    model = models.Model(inputs=[image_left, image_right], outputs=pred)

    model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
    return model
コード例 #18
0
def ft_inception_v3():
    """
    Use pretrained VGG16 as bottom

    :return:
    """
    base_model = inception_v3.InceptionV3(include_top=False,
                                          weights='imagenet',
                                          input_shape=(299, 299, 3))
    base_out = base_model.output
    x = layers.Flatten()(base_out)
    # 'Concatenate' object has no attribute 'outbound_nodes'
    x = layers.Dense(4096, activation='relu')(x)  # Can't connect
    x = layers.Dense(1024, activation='relu')(x)
    x = layers.Dense(256, activation='relu')(x)
    x = layers.Dense(64, activation='relu')(x)
    x = layers.Dense(16, activation='relu')(x)
    predictions = layers.Dense(7, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=predictions)
    for layer in base_model.layers():
        layer.trainable = False

    model.compile(optimizer=tf.train.AdadeltaOptimizer(0.001),
                  loss='categorical_crossentropy',
                  metrics=[metrics.categorical_accuracy])

    return model
コード例 #19
0
def my_detection(filename):
    # Load pre-trained image recognition model
    model = inception_v3.InceptionV3()

    # Load the image file and convert it to a numpy array
    img = image.load_img(filename, target_size=(299, 299))
    input_image = image.img_to_array(img)
    print(input_image.shape)

    # Scale the image so all pixel intensities are between [-1, 1] as the model expects
    input_image /= 255.
    input_image -= 0.5
    input_image *= 2.

    # Add a 4th dimension for batch size (as Keras expects)
    input_image = np.expand_dims(input_image, axis=0)

    # Run the image through the neural network
    print(input_image.shape)
    predictions = model.predict(input_image)

    # Convert the predictions into text and print them
    predicted_classes = inception_v3.decode_predictions(predictions, top=1)
    imagenet_id, name, confidence = predicted_classes[0][0]
    print("This is a {} with {:.4}% confidence!".format(
        name, confidence * 100))
コード例 #20
0
ファイル: bottleneck.py プロジェクト: ren-hoek/daex-meta
def insert_xception(d):
    """Insert xception bottleneck features.
    
    Add instructions
    
    """

    POOLING = 'avg'

    client = py.MongoClient('mongo')
    db = client['docs']
    col = db['aug_meta']

    doc_id = d['_id']
    doc = col.find_one({"_id": doc_id})
    grid_id = doc['raw_file']

    image = get_from_gridfs(db, grid_id)

    img = read_img(image, (299, 299))
    x = inception_v3.preprocess_input(np.expand_dims(img.copy(), axis=0))

    inception_bottleneck = inception_v3.InceptionV3(weights='imagenet',
                                                    include_top=False,
                                                    pooling=POOLING)
    train_i_bf = inception_bottleneck.predict(x, batch_size=1, verbose=0)

    if 'ml-features' not in doc:
        doc['ml-features'] = dict()
    doc['ml-features']['xception'] = train_i_bf.tolist()
    success = update_doc(col, doc_id, doc)

    return success
コード例 #21
0
    def __init__(self, input_size=224):

        input_shape = (input_size, input_size, 3)

        xception_model = xception.Xception(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        inceptionv3_model = inception_v3.InceptionV3(weights='imagenet',
                                                     include_top=False,
                                                     pooling='max',
                                                     input_shape=input_shape)
        resnet50_model = resnet50.ResNet50(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        vgg19_model = vgg19.VGG19(weights='imagenet',
                                  include_top=False,
                                  pooling='max',
                                  input_shape=input_shape)

        self.input_size = input_size
        self.models = [
            xception_model, inceptionv3_model, resnet50_model, vgg19_model
        ]
        self.graph = tf.get_default_graph()
コード例 #22
0
ファイル: grad_cam.py プロジェクト: jameszaupa/final-thesis
def grad_cam_heatmap(original):
    model = inception_v3.InceptionV3(weights='imagenet', input_shape=None)
    processed_image, original = prep(original)
    predictions = model.predict(processed_image)

    label = decode_predictions(predictions)
    class_idx = np.argmax(predictions[0])  #topmost class index
    class_output = model.output[:, class_idx]
    last_conv_layer = model.get_layer(
        'conv2d_94')  #output for the last conv layer (from model.summary())

    # We compute the gradient of the class output value with respect to the feature map.
    # Then, we pool the gradients over all the axes leaving out the channel dimension.
    # Finally, we weigh the output feature map with the computed gradient values.
    grads = K.gradients(class_output, last_conv_layer.output)[0]
    pooled_grads = K.mean(grads, axis=(0, 1, 2))
    iterate = K.function([model.input],
                         [pooled_grads, last_conv_layer.output[0]])
    pooled_grads_value, conv_layer_output_value = iterate([processed_image])
    for i in range(conv_layer_output_value.shape[2]):
        conv_layer_output_value[:, :, i] *= pooled_grads_value[i]

    heatmap = np.mean(conv_layer_output_value, axis=-1)
    heatmap = np.maximum(heatmap, 0)  #relu
    heatmap /= np.max(heatmap)
    #show the original img and the cam
    #img = cv2.resize (img , (800, 600))
    heatmap = cv2.resize(heatmap, (original.shape[1], original.shape[0]))
    heatmap = np.uint8(255 * heatmap)
    heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
    superimposed_img = cv2.addWeighted(original, 0.4, heatmap, 0.6, 0)
    return superimposed_img, heatmap
def Inceptionv3_DB1_NinaPro(input_shape, classes, dropout_rate=0.,dense1=0,dense2=0,
                        dense3=0,dense4=0,batch_norm=False):

    #Load pretrained InceptionV3 model
    inc_model= inception_v3.InceptionV3(weights='imagenet', include_top=False)
    #Keep 9/311 layers of original InceptionV3
    inc_model = Model(inc_model.input, inc_model.layers[9].output)

    # Define input
    x_input = Input(input_shape)
    output_inc= inc_model(x_input)

    #Classifier    
    if batch_norm is True:    
        x = BatchNormalization()(output_inc)
    x = Flatten()(x)
    x = Dense(dense1, activation='relu')(x) 
    x = Dropout(dropout_rate)(x)
    x = Dense(dense2, activation='relu')(x)
    if batch_norm is True:    
        x = BatchNormalization()(x)
    x = Dense(dense3, activation='relu')(x)
    x = Dense(dense4, activation='relu')(x)
    x = Dense(classes, activation='softmax')(x) # Number of classes
    model = Model(x_input, x ,name='Inceptionv3_DB1_NinaPro')
    
    for layer in model.layers[:1]:
        layer.trainable = False
    model.summary()
    print('Model is fine tuning')    
    return model
コード例 #24
0
 def __init__(self,
              weights='imagenet',
              target_size=MODEL_INPUT_SIZE_DEFAULT):
     """ Constructor. """
     self.inception_model = inception_v3.InceptionV3(weights=weights)
     self.target_size = target_size
     self.graph = tf.get_default_graph()
コード例 #25
0
def create_model():

    # #Load the VGG model
    # # vgg_model = vgg16.VGG16(weights='imagenet')

    # #Load the ResNet50 model
    # resnet_model = resnet50.ResNet50(weights='imagenet')

    # #Load the MobileNet model
    # mobilenet_model = mobilenet.MobileNet(weights='imagenet')

    # #Load the Inception_V3 model
    # inception_model = inception_v3.InceptionV3(weights='imagenet',
    #                   include_top=False,
    #                   input_shape=(224, 224, 3))

    # create the base pre-trained model
    base_model = inception_v3.InceptionV3(weights='imagenet',
                                          include_top=False)

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # and a logistic layer
    predictions = Dense(1, activation='sigmoid')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)

    save_model(model, 'base_model.h5')
    return model
コード例 #26
0
ファイル: visualize.py プロジェクト: olest/dsde-deep-learning
def deep_dream(args, model):
	"""Process:
	- Load the original image.
	- Define a number of processing scales (i.e. image shapes),
		from smallest to largest.
	- Resize the original image to the smallest scale.
	- For every scale, starting with the smallest (i.e. current one):
		- Run gradient ascent
		- Upscale image to the next scale
		- Reinject the detail that was lost at upscaling time
	- Stop when we are back to the original size.
	To obtain the detail lost during upscaling, we simply
	take the original image, shrink it down, upscale it,
	and compare the result to the (resized) original image.
	"""

	num_octave = 3  # Number of scales at which to run gradient ascent
	octave_scale = 1.2  # Size ratio between scales
	max_loss = 15.
	target_layers = {'mixed2': 0.2, 'mixed3': 0.3, 'mixed4': 0.2, 'mixed5': 0.4}
	
	model = inception_v3.InceptionV3(weights='imagenet', include_top=False)
	model.summary()
	img = preprocess_image(args.image_path)
	k_fxn = dream_fxn(model, target_layers)

	#img = cv2_image_load(args, args.image_path)

	if K.image_data_format() == 'channels_first':
		original_shape = img.shape[2:]
	else:
		original_shape = img.shape[1:3]
	successive_shapes = [original_shape]
	for i in range(1, num_octave):
		shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
		successive_shapes.append(shape)
	successive_shapes = successive_shapes[::-1]
	original_img = np.copy(img)
	shrunk_original_img = resize_img(img, successive_shapes[0])

	for shape in successive_shapes:
		print('Processing image shape', shape)
		img = resize_img(img, shape)
		img = gradient_ascent(img, k_fxn,
							  iterations=args.iterations,
							  step=args.learning_rate,
							  max_loss=max_loss)
		upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
		same_size_original = resize_img(original_img, shape)
		lost_detail = same_size_original - upscaled_shrunk_original_img

		img += lost_detail
		shrunk_original_img = resize_img(original_img, shape)

	out_file = args.save_path + '%s/%s/deep_dream/dreamy.png' % (plain_name(args.weights), plain_name(args.image_path))
	if not os.path.exists(os.path.dirname(out_file)):
		os.makedirs(os.path.dirname(out_file))
	im = deprocess_image(args, img, convert_bgr2rgb=False)
	imsave(out_file, im)
コード例 #27
0
 def compute_bottleneck_values(X):
     """ Compute bottleneck values for the pretrained model on our data """
     model = inception_v3.InceptionV3(weights='imagenet',
                                      include_top=False,
                                      input_shape=(image_size[0],
                                                   image_size[1], 3))
     bottleneck_values = model.predict(X)
     return bottleneck_values
コード例 #28
0
def initialize(the_app):
    global app
    app = the_app
    app.config['INCEPTION_MODEL'] = inception_v3.InceptionV3(
        weights='imagenet', include_top=False)
    app.config['INCEPTION_CLASSIFIER'] = get_inception_model()
    initialize_face_detector(app)
    initialize_dog_detector(app)
コード例 #29
0
def cargarModeloInceptionV3():
    #Load the InceptionV3 model
    print("Cargando modelo InceptionV3 ...")
    inceptionV3_model = inception_v3.InceptionV3(weights='imagenet')
    inceptionV3_model.summary()
    print("Modelo InceptionV3 cargado!")
    inceptionV3_graph = tf.get_default_graph()
    return inceptionV3_model, inceptionV3_graph
コード例 #30
0
        def Inception_V3(self):
            """
            Args: None

            Returns: A pre-trained (on imagenet) Inception_V3 model which is ready to predict
            """
            inception = inception_v3.InceptionV3(weights='imagenet')
            return inception