コード例 #1
0
def load_model(config):
    alexnet = convnet('alexnet', weights_path=config['pre_weight_path'])

    input = alexnet.input
    img_representation = alexnet.get_layer("flatten").output
    #print img_representation
    #dense_3 = Dropout(0.5)(dense_2)
    #dense_3 = Dense(1000,name='dense_3')(dense_3)
    #prediction = Activation("softmax",name="softmax")(dense_3)
    #classifier = Flatten(name="flatten")(img_representation)
    classifier = Dense(4096, activation="relu",
                       name='dense_1')(img_representation)
    classifier = Dropout(0.5)(classifier)
    classifier = Dense(4096, activation="relu", name='dense_2')(classifier)
    classifier = Dropout(0.5)(classifier)
    classifier = Dense(5, name='dense_3')(classifier)
    classifier = Activation("softmax", name="softmax")(classifier)
    model = Model(input=input, output=classifier)

    # Uncomment below to set the first 10 layers to non-trainable (weights will not be updated)
    print("Number of Layers: {}".format(len(model.layers)))
    for idx, layer in enumerate(model.layers):
        if idx < 29:
            layer.trainable = False
        else:
            print("Layer {} is trainable".format(idx))
            print("input shape {}".format(layer.input_shape))
            print("output shape {}".format(layer.output_shape))

    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=["accuracy"])

    return model
コード例 #2
0
def init_xception(lr, hidden, drop):
    lr = 1 / np.power(10, lr)

    """base_xception = tf.keras.applications.Xception(input_shape=(img_height, img_width, channels),
                                                   include_top=False, weights="imagenet")

    x = base_xception.output
    x = tf.keras.layers.Flatten()(x)
    x = tf.keras.layers.Dropout(drop)(x)

    x = tf.keras.layers.Dense(hidden, activation="elu")(x)

    output_layer = tf.keras.layers.Dense(1)(x)
    xception_model = tf.keras.Model(inputs=base_xception.input, outputs=output_layer)

    xception_model.compile(tf.keras.optimizers.Adam(lr=lr), tf.keras.losses.MeanSquaredError(), ["mae", "accuracy"])"""


    base_alexnet = convnet("alexnet", weights_path="alexnet_weights.h5", heatmap=False)

    x = base_alexnet.output
    x = tf.keras.layers.Flatten()(x)
    x = tf.keras.layers.Dropout(drop)(x)

    x = tf.keras.layers.Dense(hidden, activation="elu")

    output_layer = tf.keras.layers.Dense(1)(x)
    alexnet = tf.keras.Model(inputs=base_alexnet.input, outputs=output_layer)

    alexnet.compile(tf.keras.optimizers.Adam(lr=lr), tf.keras.losses.MeanSquaredError(), ["mae", "accuracy"])


    return alexnet
コード例 #3
0
    def __init__(self, weights_path=None):
        self.img_width, self.img_height = 64, 64  # dimensions of our images.
        self.out_width, self.out_height = 150, 350  # dimensions of our images.
        sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        self.model = convnet('alexnet',weights_path=weights_path, heatmap=True)
        #model = convnet('vgg_16', weights_path="post_vgg16_weights.h5", heatmap=True)

        self.model.compile(optimizer=sgd, loss='mse')
コード例 #4
0
 def __init__(self):
     K.set_image_dim_ordering('th')
     sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
     #self.model = convnet('alexnet', weights_path="alexnet_weights_imagenet.h5", heatmap=False)
     self.model = convnet(
         'alexnet',
         weights_path="./networks/alexnet/alexnet_weights_imagenet.h5",
         heatmap=False)
     self.model.compile(optimizer=sgd, loss='mse')
コード例 #5
0
 def __init__(self, weights_path=None):
     self.img_width, self.img_height = 64, 64  # dimensions of our images.
     self.out_width, self.out_height = 150, 350  # dimensions of our images.
     sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
     print("loading alexnet weights from: {}".format(weights_path))
     self.model = convnet('alexnet',
                          weights_path=weights_path,
                          heatmap=True)
     #model = convnet('vgg_16', weights_path="post_vgg16_weights.h5", heatmap=True)
     print("compiling model..")
     self.model.compile(optimizer=sgd, loss='categorical_crossentropy')
コード例 #6
0
def alex_model(config):
    alexnet = convnet('alexnet', weights_path=config['pre_weight_path'])

    input = alexnet.input
    img_representation = alexnet.get_layer("dense_2").output

    classifier = Dense(4,name='dense_3')(img_representation)
    classifier = Activation("softmax", name="softmax")(classifier)
    model = Model(input=input,output=classifier)

    # Uncomment below to set the first 10 layers to non-trainable (weights will not be updated)
    for layer in model.layers[:14]:
        layer.trainable = False

    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy',metrics=["accuracy"])

    return model
コード例 #7
0
def extract_features(files, path, labels, num_batches=1, net='alexnet'):
    batches = split_list(files, wanted_parts=num_batches)
    size = len(batches)
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    if net == 'alexnet':
        wp = "../weights/alexnet_weights.h5"
        sc = [227, 227, 3]
    elif net == 'vgg_16':
        wp = "../weights/vgg16_weights.h5"
        sc = [224, 224, 3]
    elif net == 'vgg_19':
        wp = "../weights/vgg19_weights.h5"
        sc = [224, 224, 3]
    else:
        print('Wrong network!')
        return 0
    model = convnet(net, weights_path=wp, heatmap=False)
    model.compile(optimizer=sgd, loss='mse')
    feature_map = Model(input=model.input,
                        output=model.get_layer('dense_2').output)
    y = []

    n = 0
    print('Starting, done: ' + str(n) + '%')
    times = [time.time()]
    for names in batches:
        n += 1
        for case in names:
            y.append(labels[case[:-4] + '.aiff'])
        actual_batch = get_images_ready_alexnet_batch(names, path, size=sc)
        features = feature_map.predict(actual_batch)
        if n == 1:
            data = features
        else:
            data = numpy.concatenate((data, features))
        if n * 100 / size % 5 == 0:
            times.append(time.time())
            print('Done for: ' + str(n * 100 / size) + '%')
            print('Time for the last 5%: ' + str(times[-1] - times[-2]))
            print('Expected time to finish: ' +
                  str((20 - n * 20 / size) *
                      (times[-1] - times[-2]) / 60) + ' minutes.')
    print('Done.')
    return data, y
コード例 #8
0
def load_model(model_name):
    print("Loading {} model...".format(model_name))
    if model_name == "resnet50":
        model = ResNet50(weights='imagenet')
    elif model_name == "inceptionv3":
        model = InceptionV3(weights='imagenet')
    elif model_name == "vgg19":
        model = VGG19(weights='imagenet')
    elif model_name == "vgg16":
        model = VGG16(weights='imagenet')
    elif model_name == "alexnet":
        sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        model = convnet('alexnet',
                        weights_path="../alexnet/alexnet_weights.h5",
                        heatmap=False)
        model.compile(optimizer=sgd, loss='mse')
    else:
        raise ValueError("Wrong model name")
    print("Loaded!")
    return model, model_name
コード例 #9
0
from keras.optimizers import SGD
from convnetskeras.convnets import preprocess_image_batch, convnet

im = preprocess_image_batch(['examples/index.jpg'],
                            img_size=(256, 256),
                            crop_size=(227, 227),
                            color_mode="rgb")

sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model = convnet('alexnet',
                weights_path="weights/alexnet_weights(1).h5",
                heatmap=False)
model.compile(optimizer=sgd, loss='mse')

out = model.predict(im)
コード例 #10
0
    model = Model(input=inputs, output=dense_1)
    if weights_path:
        model.load_weights(weights_path)
    return model


#Here is code from a 3D CNN example on the following blog:
#   http://learnandshare645.blogspot.in/2016/06/3d-cnn-in-keras-action-recognition.html
#
#Good initial CNN tutorial:
#   http://machinelearningmastery.com/handwritten-digit-recognition-using-convolutional-neural-networks-python-keras/

#ourAlexModel = convnet('alexnet',weights_path='alexnet_weights.h5')
ourAlexModel = myAlexNet()
originalAlexModel = convnet('alexnet',
                            weights_path='alexnet_weights.h5',
                            heatmap=False)
#alexmodel = convnet('alexnet', heatmap=False)
for layer, mylayer in zip(originalAlexModel.layers, ourAlexModel.layers):
    print(layer.name)
    #if mylayer.name == 'mil_1':
    if mylayer.name == 'flatten':
        break
    else:
        weightsval = layer.get_weights()
        print(len(weightsval))
        mylayer.set_weights(weightsval)

ourAlexModel.compile(loss='categorical_crossentropy',
                     optimizer='sgd',
                     metrics=['accuracy'])
コード例 #11
0
print(np.sum(y1 == 1))
print(np.sum(y1 == 2))
print(np.sum(y1 == 3))
print(np.sum(y1 == 4))

#    print(GL)
print(GL.shape[0])
print(TOT_0_r.shape[0])

#########################################################
#### CNN feature extraction
#########################################################

#CNN_model = convnet('alexnet',weights_path="alexnet_weights.h5", heatmap=False)
CNN_model = convnet('alexnet24',
                    weights_path="./tmp/weights_alex_7_tot.hdf5",
                    heatmap=False)
#        CNN_model = convnet('vgg_16',weights_path="vgg16_weights.h5", heatmap=False)

from keras import backend as K
get_dense_features = K.function(
    [CNN_model.layers[0].input, K.learning_phase()],
    [CNN_model.layers[31].output])  #33

print("Feature extraction...")
X_0 = np.zeros((97, INPUT_LEN, INPUT_DIM))
X_90 = np.zeros((97, INPUT_LEN, INPUT_DIM))
X_180 = np.zeros((97, INPUT_LEN, INPUT_DIM))
X_270 = np.zeros((97, INPUT_LEN, INPUT_DIM))
y = np.zeros((97, INPUT_LEN, OUTPUT_LEN))
コード例 #12
0
ファイル: a.py プロジェクト: MansoorHanif/FYP-web-app
# 		print 'This Breed is not served, or could not be classified'

import numpy as np
from keras.optimizers import SGD
from keras.preprocessing import image
from convnetskeras.convnets import preprocess_image_batch, convnet
import sys

############################################# 		MODEL
# Optimizer
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)

# Model
model = convnet(
    'alexnet',
    weights_path=
    "/home/mansoor/pywork/catsanddogs/Alexnet/SGD_CC/allbest/weights--11-0.94.hdf5",
    heatmap=False,
    trainable=False)

# Compile model
model.compile(optimizer=sgd, loss='categorical_crossentropy')
print "Model successfully built."


def process_image(img_path):

    ###########################################		 Predictions
    print img_path

    img = image.load_img(img_path, target_size=(227, 227))
    x = image.img_to_array(img)
コード例 #13
0
    "teapot", "table lamp", "castle", "pillow", "volcano", "coffee mug",
    "envelope"
]
categoryPaths = [[
    'data/' + category + '/0000000' + str(i) + '.jpg' for i in range(500)
] for category in categories]

outputs = []
for category, paths in enumerate(categoryPaths):
    print('start processing category ' + categories[category])
    # And resize it to fit
    imgs = preprocess_image_batch(paths,
                                  img_size=(256, 256),
                                  crop_size=(224, 224),
                                  color_mode="bgr")
    # Specify Model Parameters, load pretrained weights and compile Model
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model = convnet('vgg_16', weights_path="vgg16_weights.h5", heatmap=False)
    model.compile(optimizer=sgd, loss='mse')

    # Predict image
    out = model.predict(imgs)
    print('saving')
    json.dump({categories[category]: out.tolist()},
              open(categories[category] + '.json', 'w'))
    outputs.append({categories[category]: out.tolist()})

print('save final result')
json.dump(outputs, open('classifications.json', 'w'))
print('finished')
コード例 #14
0
    im = im[:,:,0:3]
    #print(im.shape)
    draw_img[0:im.shape[0], 0:im.shape[1] ] = im
    
    

    
    return draw_img

from keras.optimizers import SGD
from convnetskeras.convnets import preprocess_image_batch, convnet
from convnetskeras.imagenet_tool import synset_to_dfs_ids


sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model = convnet('alexnet',weights_path="weights/alexnet_weights.h5", heatmap=True)
model.compile(optimizer=sgd, loss='mse')

def full_pipe(img):
 img =  lanefind.lanePipeline(img)
 img =  carDetectionPipelineNN(img)
 return img

from moviepy.editor import VideoFileClip
def processVideo(input_video,output):
    initializePipeline()
    print('input:',input_video)
    clip1 = VideoFileClip(input_video)
    print('got clip1')
    #out_clip = clip1.fl_image(carDetectionPipelineNN)
    out_clip = clip1.fl_image(full_pipe)
コード例 #15
0
ファイル: model1.py プロジェクト: leonardblier/neural-models
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.models import Model
from keras.layers import Dense, Dropout, Input, Activation

from convnetskeras. convnets import convnet

trainable_layers = [None]




vgg16 = convnet('vgg_16',
                  weights_path='/srv/data/convnetsweights/vgg16_weights.h5',
                  trainable=["None"])

for l in vgg16.layers:
    if l.name in trainable_layers:
        pass
    else:
        l.trainable = False
        

input = vgg16.input
img_representation = vgg16.get_layer("dense_2").output

classifier = Dense(2,name='classifier',
                   W_regularizer=l2(0.001))(img_representation)
classifier = Activation("softmax", name="softmax")(classifier)

model = Model(input=input,output=classifier)
コード例 #16
0
    X_test_extend = np.zeros((X_test.shape[0], 3, 227, 227))
    for i in xrange(X_test.shape[0]):
        rex = np.resize(X_test[i, :, :, :], (227, 227))
        X_test_extend[i, 0, :, :] = rex
        X_test_extend[i, 1, :, :] = rex
        X_test_extend[i, 2, :, :] = rex
    X_test = X_test_extend
    X_test_test_extend = np.zeros((X_test_test.shape[0], 3, 227, 227))
    for i in xrange(X_test_test.shape[0]):
        rex = np.resize(X_test_test[i, :, :, :], (227, 227))
        X_test_test_extend[i, 0, :, :] = rex
        X_test_test_extend[i, 1, :, :] = rex
        X_test_test_extend[i, 2, :, :] = rex
    X_test_test = X_test_test_extend
    alexmodel = convnet('alexnet',
                        weights_path='alexnet_weights.h5',
                        heatmap=False)
    model = convnet('alexnet', outdim=2)
    if pretrain:
        for layer, mylayer in zip(alexmodel.layers, model.layers):
            print(layer.name)
            if layer.name == 'dense_3':
                break
            else:
                weightsval = layer.get_weights()
                print(len(weightsval))
                mylayer.set_weights(weightsval)

X_test_test = X_test_test.astype('float32')
for f in os.listdir('./'):
    metrics = ['loss', 'auc', 'f1', 'acc', 'reca', 'prec']
コード例 #17
0
ファイル: model8.py プロジェクト: afcarl/neural-models
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.models import Model
from keras.layers import Dense, Dropout, Input, Activation

from convnetskeras.convnets import convnet

trainable_layers = []

vgg16 = convnet('vgg_16',
                weights_path='/srv/data/convnetsweights/vgg16_weights.h5',
                trainable=["None"])

for l in vgg16.layers:
    if l.name in trainable_layers:
        pass
    else:
        l.trainable = False

input = vgg16.input
img_representation = vgg16.get_layer("flatten").output

classifier = Dense(2, name='classifier',
                   W_regularizer=l2(0.001))(img_representation)
classifier = Activation("softmax", name="softmax")(classifier)

model = Model(input=input, output=classifier)

sgd = SGD(lr=.1, decay=1.e-6, momentum=0.9, nesterov=False)

model.compile(optimizer=sgd,
コード例 #18
0
#    just_file_names.append(each.split('/')[8])
#targets = np.concatenate((np.ones((len(image_paths_live),3,227,227)),np.zeros((len(image_paths_spoof),3,227,227))))
#targets = np.array([list(np.concatenate((np.ones(len(image_paths_live)),np.zeros(len(image_paths_spoof))))).reshape(len(image_paths_live)+len(image_paths_spoof),1), list(np.concatenate((np.zeros(len(image_paths_live)),np.ones(len(image_paths_spoof))))).reshape(len(image_paths_live)+len(image_paths_spoof),1)])

im = preprocess_image_batch(im_full_paths,
                            img_size=(256, 256),
                            crop_size=(227, 227),
                            color_mode="rgb")
im_validation = preprocess_image_batch(im_full_paths_validation,
                                       img_size=(256, 256),
                                       crop_size=(227, 227),
                                       color_mode="rgb")

base_model = convnet(
    'alexnet',
    weights_path=
    "/Users/km4n6/Box Sync/kiran/NN_project/final_project/weights/alexnet_weights.h5",
    heatmap=False)
x = base_model.output
#x=Dropout(0.5)(x)
x = Dense(550,
          input_dim=500,
          name='Relu_dense',
          init='normal',
          activation='relu')(x)
x = Dense(250,
          input_dim=250,
          name='Relu_dense2',
          init='normal',
          activation='relu')(x)
predictions = Dense(2,
コード例 #19
0
dgdx_val = test_datagen.flow_from_directory(
        '/home/wangnxr/dataset/vid_offset_0/test/',
        read_formats={'png'},
        num_frames=4,
        target_size=(int(340), int(256)),
        batch_size=32,
        class_mode='binary')
#pdb.set_trace()
train_datagen.fit_generator(dgdx, nb_iter=96)
test_datagen.fit_generator(dgdx_val, nb_iter=96)

train_generator=dgdx
validation_generator=dgdx_val
#pdb.set_trace()
alexnet_model = convnet('alexnet', weights_path="/home/wangnxr/Documents/ecogdeep/convnets-keras/examples/alexnet_weights.h5")
#base_model = VGG16(input_tensor=(Input(shape=(3,224, 224))), include_top=False, weights='imagenet')
base_model = Model(alexnet_model.input, alexnet_model.get_layer("dense_2").output)

frame_a = Input(shape=(3,227,227))
frame_b = Input(shape=(3,227,227))
frame_c = Input(shape=(3,227,227))
frame_d = Input(shape=(3,227,227))


tower1 = base_model(frame_a)
tower2 = base_model(frame_b)
tower3 = base_model(frame_c)
tower4 = base_model(frame_d)
x = merge([tower1, tower2, tower3, tower4], mode='concat')
コード例 #20
0
from keras.optimizers import SGD
from convnetskeras.convnets import preprocess_image_batch, convnet

im = preprocess_image_batch(['examples/dog.jpg'],img_size=(256,256), crop_size=(227,227), color_mode="rgb")

sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model = convnet('alexnet',weights_path="weights/alexnet_weights.h5", heatmap=False)
model.compile(optimizer=sgd, loss='mse')

out = model.predict(im)
print(out)
コード例 #21
0
     rex = np.resize(X_test[i, :, :, :], (227, 227))
     X_test_extend[i, 0, :, :] = rex
     X_test_extend[i, 1, :, :] = rex
     X_test_extend[i, 2, :, :] = rex
 X_test = X_test_extend
 X_test_test_extend = np.zeros((X_test_test.shape[0], 3, 227, 227))
 for i in xrange(X_test_test.shape[0]):
     rex = np.resize(X_test_test[i, :, :, :], (227, 227))
     X_test_test_extend[i, 0, :, :] = rex
     X_test_test_extend[i, 1, :, :] = rex
     X_test_test_extend[i, 2, :, :] = rex
 X_test_test = X_test_test_extend
 if pretrain:  # 227*227
     alexmodel = convnet('alexnet',
                         weights_path='alexnet_weights.h5',
                         heatmap=False,
                         l1=l1factor,
                         l2=l2factor)
     model = convnet('alexnet',
                     outdim=2,
                     l1=l1factor,
                     l2=l2factor,
                     usemymil=mymil,
                     k=mymilk)
     for layer, mylayer in zip(alexmodel.layers, model.layers):
         print(layer.name)
         if mylayer.name == 'mil_1':
             break
         else:
             weightsval = layer.get_weights()
             print(len(weightsval))
コード例 #22
0
    X_train[i, :, :, :] = x

#X_train = preprocess_image_batch(list_train_images, img_size=(227,227), color_mode='bgr')

#X_train = X_train/255.0
#mean_train = np.mean(X_train)
#std_train = np.std(X_train)
#X_train -= mean_train
#X_train /= std_train

#with open('X_train_stats_alexnet.txt', 'w') as out_file:
#    out_file.write(str(mean_train) + "\n")
#    out_file.write(str(std_train) + "\n")

base_model = convnet('alexnet',
                     weights_path="../models/alexnet_weights.h5",
                     heatmap=False)

# this is the model we will train
model = Sequential()
base_model.layers.pop()
base_model.layers.pop()
model.add(base_model)
#model.add(Flatten(input_shape=base_model.output_shape[1:]))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
print(base_model.summary())
print(model.summary())

# first: train only the top layers (which were randomly initialized)
コード例 #23
0
import os
os.environ['KERAS_BACKEND'] = 'theano'
os.environ['THEANO_FLAGS'] = 'floatX=float32,device=gpu,lib.cnmem=0.8,dnn.conv.algo_bwd_filter=deterministic,dnn.conv.algo_bwd_data=deterministic,blas.ldflags=-LC:/toolkits/openblas-0.2.14-int32/bin -lopenblas'

from keras.optimizers import SGD
from convnetskeras.convnets import preprocess_image_batch, convnet
import numpy as np
DATAPATH = "../USNS/RawData/train/"
imgNames = [DATAPATH+fname for fname in os.listdir(DATAPATH) if "mask" not in fname]

x_train = preprocess_image_batch(imgNames,img_size=(256,256), crop_size=(227,227), color_mode="rgb")
print "Pre-processing done"
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model = convnet('alexnet',weights_path="../USNS/AlexNet/weights/alexnet_weights.h5", heatmap=False)
model.compile(optimizer=sgd, loss='mse')
print "Model Compiled"
train = model.predict(x_train)
print "Predictions made"
np.savez("../USNS/AlexNet/train/Data.npz",X_train=train)
コード例 #24
0
    (TOT_0_r[ind_train_0, :, :], TOT_90_r[ind_train_90, :, :],
     TOT_180_r[ind_train_180, :, :], TOT_270_r[ind_train_270, :, :]),
    axis=0)
newYtest = np.concatenate(
    (y[ind_test_0], y[ind_test_90], y[ind_test_180], y[ind_test_270]), axis=0)
newYval = np.concatenate(
    (y[ind_val_0], y[ind_val_90], y[ind_val_180], y[ind_val_270]), axis=0)
newYtrain = np.concatenate(
    (y[ind_train_0], y[ind_train_90], y[ind_train_180], y[ind_train_270]),
    axis=0)

sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
###model = convnet('alexnet',weights_path="alexnet_weights.h5", heatmap=False)
###model2 = convnet('alexnet24', weights_path=None, heatmap=False)
model2 = convnet('alexnet24',
                 weights_path="./tmp/weights_alex_7_5.hdf5",
                 heatmap=False)  ###

model2.compile(loss='categorical_crossentropy',
               optimizer=sgd,
               metrics=['accuracy'])

ny = np.zeros(newYtest.shape[0])
for nf in range(0, newYtest.shape[0]):
    for df in range(24):
        if newYtest[nf, df] == 1:
            ny[nf] = df

score = model2.evaluate(X_test1, newYtest, batch_size=32)
loss_and_metrics = model2.evaluate(X_test1, newYtest)
predict = model2.predict(X_test1, batch_size=32)
コード例 #25
0
from keras.optimizers import SGD
from convnetskeras.convnets import preprocess_image_batch, convnet
from convnetskeras.imagenet_tool import synset_to_dfs_ids

#im = preprocess_image_batch(['examples/bern.jpg'], color_mode="bgr")
im = preprocess_image_batch(['examples/test.png'], color_mode="bgr")

sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model = convnet(
    'alexnet',
    weights_path="../../../../models/fully_conv/alexnet_weights.h5",
    heatmap=True)
#model = convnet('vgg_16',weights_path="../../../../models/fully_conv/vgg16_weights.h5", heatmap=True)

model.compile(optimizer=sgd, loss='mse')

out = model.predict(im)

s = "n02084071"
ids = synset_to_dfs_ids(s)
heatmap = out[0, ids].sum(axis=0)

# Then, we can get the image
import matplotlib.pyplot as plt
plt.imsave("examples/heatmap.png", heatmap)
コード例 #26
0
    X_test = []
    X_test_id = []
    for fl in files:
        flbase = os.path.basename(fl)
        im = preprocess_image_batch([fl],
                                    img_size=(256, 256),
                                    crop_size=(227, 227),
                                    color_mode="rgb")
        out = model.predict(im)
        X_test.append(out.flatten())
        X_test_id.append(flbase)
    return np.array(X_test), X_test_id


sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model = convnet('alexnet', weights_path="weights/alexnet_weights.h5")
model.layers.pop()
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.compile(optimizer=sgd, loss='mse')

p, y, Xid = load_train()
print(p)
train = pd.DataFrame(p, index=Xid)

pt, Xtid = load_test()
test = pd.DataFrame(pt, index=Xtid)

train.to_csv("train.bottleneck.vgg16.csv")
test.to_csv("test.bottleneck.vgg16.csv")
コード例 #27
0
     rex = np.resize(X_test[i, :, :, :], (227, 227))
     X_test_extend[i, 0, :, :] = rex
     X_test_extend[i, 1, :, :] = rex
     X_test_extend[i, 2, :, :] = rex
 X_test = X_test_extend
 X_test_test_extend = np.zeros((X_test_test.shape[0], 3, 227, 227))
 for i in xrange(X_test_test.shape[0]):
     rex = np.resize(X_test_test[i, :, :, :], (227, 227))
     X_test_test_extend[i, 0, :, :] = rex
     X_test_test_extend[i, 1, :, :] = rex
     X_test_test_extend[i, 2, :, :] = rex
 X_test_test = X_test_test_extend
 if pretrain:  # 227*227
     alexmodel = convnet('alexnet',
                         weights_path='alexnet_weights.h5',
                         heatmap=False,
                         l1=l1factor,
                         l2=l2factor)
     model = convnet('alexnet',
                     outdim=2,
                     l1=l1factor,
                     l2=l2factor,
                     sparsemil=sparsemil,
                     sparsemill1=sparsemill1,
                     sparsemill2=sparsemill2)
     for layer, mylayer in zip(alexmodel.layers, model.layers):
         print(layer.name)
         if mylayer.name == 'mil_1':
             break
         else:
             weightsval = layer.get_weights()
コード例 #28
0
    X_test = []
    X_test_id = []
    for fl in files:
        flbase = os.path.basename(fl)
        im = preprocess_image_batch([fl],
                                    img_size=(256,256),
                                    crop_size=(224,224),
                                    color_mode="bgr")
        out = model.predict(im)
        X_test.append(out.flatten())
        X_test_id.append(flbase)
    return np.array(X_test), X_test_id

sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model = convnet('vgg_16',weights_path="weights/vgg16_weights.h5")
model.layers.pop()
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.compile(optimizer=sgd, loss='mse')

p, y, Xid = load_train()
print(p)
train = pd.DataFrame(p, index=Xid)

pt, Xtid = load_test()
test = pd.DataFrame(pt, index=Xtid)

train.to_csv("train.bottleneck.vgg16.new.csv")
test.to_csv("test.bottleneck.vgg16.new.csv")