Ejemplo n.º 1
0
def Run(self, img_path, model_name):

    # config variables
    weights = 'imagenet'
    include_top = 0
    train_path = 'jpg'
    classfier_file = 'output/flowers_17/' + model_name + '/classifier.cpickle'

    # create the pretrained models
    # check for pretrained weight usage or not
    # check for top layers to be included or not
    if model_name == "vgg16":
        from vgg16 import VGG16, preprocess_input
        base_model = VGG16(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('fc1').output)
        image_size = (224, 224)
    elif model_name == "vgg19":
        from vgg19 import VGG19, preprocess_input
        base_model = VGG19(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('fc1').output)
        image_size = (224, 224)
    elif model_name == "resnet50":
        from resnet50 import ResNet50, preprocess_input
        base_model = ResNet50(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('avg_pool').output)
        image_size = (224, 224)
    elif model_name == "inceptionv3":
        from inception_v3 import InceptionV3, preprocess_input
        base_model = InceptionV3(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('mixed9').output)
        image_size = (299, 299)
    elif model_name == "xception":
        from xception import Xception, preprocess_input
        base_model = Xception(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('avg_pool').output)
        image_size = (299, 299)
    else:
        base_model = None

    img = image.load_img(img_path, target_size=image_size)
    img_array = image.img_to_array(img)
    img_array = np.expand_dims(img_array, axis=0)
    img_array = preprocess_input(img_array)
    feature = model.predict(img_array)
    feature = feature.flatten()
    with open(classfier_file, 'rb') as f:
        model2 = pickle.load(f)

    pred = model2.predict(feature)
    prob = model2.predict_proba(np.atleast_2d(feature))[0]

    return pred, prob[0]
Ejemplo n.º 2
0
le = LabelEncoder()
le_labels = le.fit_transform(labels)

# In[11]:

# loop over all the labels in the folder
for label in train_labels:
    cur_path = train_path + "/" + label
    for image_path in glob.glob(cur_path):
        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        print(label + " complete")
        sys.stdout.flush()
        feature = model.predict(x)
        flat = feature.flatten()
        features.append(flat)

for img_file in enchinacea_imgfiles:
    img = image.load_img(img_file, target_size=image_size)
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print(img_file + " complete")
    feature = model.predict(x)
    flat = feature.flatten()
    features.append(flat)

for img_file in frangipani_imgfiles:
    img = image.load_img(img_file, target_size=image_size)
Ejemplo n.º 3
0
Important Tips:

Contemplating the following two tips deeply and patiently !!!!!!!!!!

Compare model.summary() and pp model.trainable_weights, we can see that how Conv1D weights or filters are used to screen embedding_1 tensor

In fact, for all layers, either weights of Conv1D, Dense, or that of Conv2D, consider them to be filters, to screen previous layer's tensor

How embedding weights transform input_1 (?, 1000) to embedding_1 (?, 1000, 100)? deserve research later
"""

model_path = "/Users/Natsume/Downloads/data_for_all/word_embeddings/pretrainedWordEmbedding_2.h5"
if os.path.isfile(model_path):
    model = load_model(
        "/Users/Natsume/Downloads/data_for_all/word_embeddings/pretrainedWordEmbedding_2.h5"
    )

model.fit(x_train, y_train, batch_size=128, epochs=1, validation_split=0.2)
#   validation_data=(x_val, y_val))

model.save(
    "/Users/Natsume/Downloads/data_for_all/word_embeddings/pretrainedWordEmbedding_3.h5"
)

loss, accuracy = model.evaluate(x_test,
                                y_test,
                                batch_size=len(x_test),
                                verbose=1)
preds = model.predict(x_test)
preds_integer = np.argmax(preds, axis=1)
Ejemplo n.º 4
0
	  '            mapping output names to Numpy arrays.\n',
	  '        batch_size: integer. Number of samples per gradient update.\n',
	  '        verbose: verbosity mode, 0 or 1.\n',
	  '        sample_weight: Array of weights to weight the contribution\n',
	  '            of different samples to the loss and metrics.\n',
	  '\n',
	  '    Returns:\n',
	  '        Scalar test loss (if the model has a single output and no '
	  'metrics)\n',
	  '        or list of scalars (if the model has multiple outputs\n',
	  '        and/or metrics). The attribute `model.metrics_names` will give '
	  'you\n',
	  '        the display labels for the scalar outputs.\n',
	"""

	preds = model.predict(x, verbose=1)
	"""
	(['  def predict(self, x, batch_size=32, verbose=0):\n',
	  '    Generates output predictions for the input samples.\n',
	  '\n',
	  '    Computation is done in batches.\n',
	  '\n',
	  '    Arguments:\n',
	  '        x: the input data, as a Numpy array\n',
	  '            (or list of Numpy arrays if the model has multiple outputs).\n',
	  '        batch_size: integer.\n',
	  '        verbose: verbosity mode, 0 or 1.\n',
	  '\n',
	  '    Returns:\n',
	  '        Numpy array(s) of predictions.\n',
	  '\n',
Ejemplo n.º 5
0
def main():
    training_images, training_labels, test_images, test_labels = load_dataset()

    # plt.imshow(training_images[:,:,0], cmap='gray')
    # plt.show()

    perm_train = np.random.permutation(training_labels.size)
    training_labels = training_labels[perm_train]
    training_images = (training_images[perm_train, :, :] - 127.5) / 127.5
    training_images = np.expand_dims(training_images, -1)
    print(training_images.shape)
    test_images = test_images / 255.0
    test_images = np.expand_dims(test_images, -1)

    # pdb.set_trace()

    #    training_labels = to_categorical(training_labels, NUM_CLASSES)
    #    test_labels = to_categorical(test_labels, NUM_CLASSES)

    BATCH_SIZE = 32 * 8
    WIDTH, HEIGHT = 28, 28
    adam_lr = 0.0002
    adam_beta_1 = 0.5

    #####################################
    ### Defiining the Discriminator:
    #####################################
    input_D = Input(shape=(HEIGHT, WIDTH, 1), name='input_D')
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(2, 2),
               padding='same',
               name='conv1_D')(input_D)
    #x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(2, 2),
               padding='same',
               name='conv2_D')(x)
    #x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Flatten()(x)
    x = Dense(128, activation='relu', name='dense1_D')(x)
    output_D = Dense(1, activation='sigmoid', name='output_D')(x)
    model_D = Model(inputs=input_D, outputs=output_D)
    model_D.compile(loss='binary_crossentropy',
                    optimizer=tf.train.AdamOptimizer(learning_rate=adam_lr,
                                                     beta1=adam_beta_1),
                    metrics=['accuracy'])

    #####################################
    ### Defiining the Generator:
    #####################################
    LATENT_SIZE = 100
    input_G = Input(shape=(LATENT_SIZE, ), name='input_gen')
    x = Dense(7 * 7 * 32, activation='linear', name='Dense1_G')(input_G)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Reshape((7, 7, 32))(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               name='conv1_gen')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               name='conv2_gen')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filters=1,
               kernel_size=1,
               strides=(1, 1),
               padding='same',
               name='conv3_gen')(x)
    img_G = Activation('tanh')(x)
    model_G = Model(inputs=input_G, outputs=img_G)
    model_G.compile(loss='binary_crossentropy',
                    optimizer=tf.train.AdamOptimizer(learning_rate=adam_lr,
                                                     beta1=adam_beta_1))

    #####################################
    ### Defiining the Combined GAN:
    #####################################
    model_D.trainable = False  # Since model_D is already compiled, thediscriminator model remains trainble,
    # but here in the combined model it becomes non-trainable
    input_main = Input(
        shape=(LATENT_SIZE, ), name='input_main'
    )  # Note that this input should be different from the input to Generator
    combined = Model(inputs=input_main, outputs=model_D(model_G(input_main)))
    combined.compile(loss='binary_crossentropy',
                     optimizer=tf.train.AdamOptimizer(learning_rate=adam_lr,
                                                      beta1=adam_beta_1),
                     metrics=['accuracy'])

    print(combined.summary())

    #####################################
    ### Training:
    #####################################
    bar = InitBar()
    N = training_images.shape[0]
    for iter in range(100):
        fake_input = np.random.randn(1, LATENT_SIZE)
        fake_image = model_G.predict(fake_input)
        loss_G, acc_G, loss_D, acc_D = 0, 0, 0, 0
        steps = (int)(np.ceil(float(N) / float(BATCH_SIZE)))
        for batch_iter in range(steps):
            bar(100.0 * batch_iter / float(steps))
            real_image, _ = get_batch(batch_iter, BATCH_SIZE / 2,
                                      training_images, training_labels)
            ####################
            ## Discriminator Training
            ####################
            #  Note that if using BN layer in Discriminator, minibatch should contain only real images or fake images.
            fake_input = np.random.randn(BATCH_SIZE / 2, LATENT_SIZE)
            fake_image = model_G.predict(fake_input)
            #real_image = get_real_mbatch(batch_sz=BATCH_SIZE/2, data=training_images)
            agg_input = np.concatenate((fake_image, real_image), axis=0)
            agg_output = np.zeros((BATCH_SIZE, ))
            agg_output[BATCH_SIZE / 2:] = 1
            perm = np.random.permutation(BATCH_SIZE)
            agg_input = agg_input[perm]
            agg_output = agg_output[perm]
            #pdb.set_trace()
            tr = model_D.train_on_batch(x=agg_input, y=agg_output)
            loss_D += tr[0]
            acc_D += tr[1]
            #####################
            ## Generator Training
            #####################
            fake_input = np.random.randn(BATCH_SIZE, LATENT_SIZE)
            fake_label = np.ones(BATCH_SIZE, )
            tr = combined.train_on_batch(x=fake_input, y=fake_label)
            loss_G += tr[0]
            acc_G += tr[1]
        print('\nG_loss = {}, G_acc = {}\nD_loss = {}, D_acc = {}'.format(
            loss_G / float(steps), acc_G / float(steps), loss_D / float(steps),
            acc_D / float(steps)))

    for iter in range(10):
        fake_input = np.random.randn(1, LATENT_SIZE)
        fake_image = model_G.predict(fake_input)
        plt.imshow(fake_image[0, :, :, 0])
        plt.show()
Ejemplo n.º 6
0
else:
	base_model = None


# In[10]:


# loop over all the labels in the folder
for label in train_labels:
    cur_path = train_path + "/" + label
    for image_path in glob.glob(cur_path):
        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        feature = model.predict(x)
        flat = feature.flatten()
        features.append(flat)


# In[11]:


# encode the labels using LabelEncoder
targetNames = np.unique(labels)
le = LabelEncoder()
le_labels = le.fit_transform(labels)


# In[12]:
Ejemplo n.º 7
0
# hist = fit() will record a loss for each epoch
#######################################################

hist1 = model.fit(X, y, batch_size=1, validation_split=0.25,
                  epochs=10)  # accuracy 0.75
hist2 = model.fit(X, y, batch_size=1, epochs=1000)  # accuracy 0.75

# See how weights changes to make function more close to xor function
epochs = 5
for epoch in range(epochs):
    print("epoch:", epoch)
    model.fit(X, y, batch_size=1, epochs=1)
    print("Layer1 weights shape:")
    print(model.layers[0].weights)
    print("Layer1 kernel:")
    print(model.layers[0].get_weights()
          [0])  # each training, network step closer to xor function
    print("Layer1 bias:")
    print(model.layers[0].get_weights()[1])

print(model.predict(X))
print(model1.predict(X))
error = model.evaluate([X], [y])
print("error", error)
"""
[[ 0.0033028 ]
 [ 0.99581173]
 [ 0.99530098]
 [ 0.00564186]]
"""
Ejemplo n.º 8
0
    for i in range(generated_images.shape[0]):
        plt.subplot(dim[0], dim[1], i + 1)
        img = generated_images[i, :, :, 0]
        plt.imshow(img)
        plt.axis('off')
    plt.tight_layout()
    plt.show()


ntrain = 10000
trainidx = random.sample(range(0, X_train.shape[0]), ntrain)
XT = X_train[trainidx, :, :, :]

# Pre-train the discriminator network ...
noise_gen = np.random.uniform(0, 1, size=[XT.shape[0], 100])
generated_images = generator.predict(noise_gen)
X = np.concatenate((XT, generated_images))
n = XT.shape[0]
y = np.zeros([2 * n, 2])
y[:n, 1] = 1
y[n:, 0] = 1

make_trainable(discriminator, True)
discriminator.fit(X, y, epochs=1, batch_size=128)
y_hat = discriminator.predict(X)

# Measure accuracy of pre-trained discriminator network
y_hat_idx = np.argmax(y_hat, axis=1)
y_idx = np.argmax(y, axis=1)
diff = y_idx - y_hat_idx
n_tot = y.shape[0]
from tensorflow.contrib.keras.python.keras.models import Model, Sequential, load_model
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K

input_array_small = np.random.random((500, 10)) * 2
target_small = np.random.random((500, 1))

input_tensor = Input(shape=(10, ))
bn_tensor = BatchNormalization()(input_tensor)
dp_tensor = Dropout(0.7)(input_tensor)

#### Access BatchNormalization layer's output as arrays in both test, train mode

# test mode from Model method
model_bn = Model(input_tensor, bn_tensor)
bn_array = model_bn.predict(input_array_small)

# test and train mode from K.function method
k_bn = K.function([input_tensor, K.learning_phase()], [bn_tensor])
bn_array_test = k_bn([input_array_small, 0])[0]
bn_array_train = k_bn([input_array_small, 1])[0]

# are test mode the same? and test mode array differ from train mode array
(bn_array == bn_array_test).sum()
bn_array.shape  # compare to see for equality
(bn_array == bn_array_train).sum()  # total differ

#### Access Dropout layer's output as array in both test and train mode

# test mode from Model method
model_dp = Model(input_tensor, dp_tensor)
Ejemplo n.º 10
0
from tensorflow.contrib.keras.python.keras import backend as K
import numpy as np

input_tensor = Input(shape=(100, ), name="input_tensor")
inter_tensor = Dense(30, name="my_layer")(input_tensor)
final_tensor = Dense(30, name="final_layer")(inter_tensor)
model = Model(input_tensor, final_tensor)  # create the original model

layer_name = 'my_layer'
intermediate_layer_model = Model(inputs=model.input,
                                 outputs=model.get_layer(layer_name).output)

# must compile before predict? No, but must compile before training
input_array1 = np.random.random((1000, 100)) * 9
input_tensor1 = K.constant(value=input_array1)
intermediate_output = intermediate_layer_model.predict(
    input_array1)  # return array
intermediate_output1 = intermediate_layer_model(
    input_tensor1
)  # return tensor not array; tensor is no use and a long way to go to reach array
"""
Alternatively, you can build a Keras function that will return the output of a certain layer given a certain input, for example:
"""

from tensorflow.contrib.keras.python.keras import backend as K

# with a Sequential model
get_3rd_layer_output = K.function([model.layers[0].input],
                                  [model.layers[1].output])
layer_output = get_3rd_layer_output([input_array1
                                     ])[0]  # [0] due to return a list
"""
Ejemplo n.º 11
0
    filepath = os.path.join(img_path, img_name)

    img = cv2.imread(filepath)

    X = format_img(img, C)

    img_scaled = np.transpose(X.copy()[0, (2, 1, 0), :, :], (1, 2, 0)).copy()
    img_scaled[:, :, 0] += 123.68
    img_scaled[:, :, 1] += 116.779
    img_scaled[:, :, 2] += 103.939

    img_scaled = img_scaled.astype(np.uint8)
    X = np.transpose(X, (0, 2, 3, 1))

    # get the feature maps and output from the RPN
    [Y1, Y2, F] = model_rpn.predict(X)

    R = roi_helpers.rpn_to_roi(Y1, Y2, C, overlap_thresh=0.7)

    # convert from (x1,y1,x2,y2) to (x,y,w,h)
    R[:, 2] -= R[:, 0]
    R[:, 3] -= R[:, 1]

    # apply the spatial pyramid pooling to the proposed regions
    bboxes = {}
    probs = {}

    for jk in range(R.shape[0] // C.num_rois + 1):
        ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :],
                              axis=0)
        if ROIs.shape[1] == 0:
Ejemplo n.º 12
0
input_shape = list(train_model.layers[0].batch_input_shape[1:])
del train_model
input_shape[0] = nt
inputs = Input(shape=tuple(input_shape))
predictions = test_prednet(inputs)
test_model = Model(inputs=inputs, outputs=predictions)

test_generator = SequenceGenerator(nt,
                                   interval=INTERVAL,
                                   dimension=dim,
                                   data_size=10,
                                   batch_size=batch_size,
                                   N_seq=10)
X_test = test_generator.create_all()
X_hat = test_model.predict(X_test, batch_size)

# Plot some predictions

aspect_ratio = float(X_hat.shape[2] / X_hat.shape[3])
plt.figure(figsize=(nt, 2 * aspect_ratio))
gs = gridspec.GridSpec(2, nt)
gs.update(wspace=0.1, hspace=0.1)
plot_save_dir = os.path.join(
    RESULTS_SAVE_DIR, 'prediction_plots_interval' + str(INTERVAL) + '_EXTRAP' +
    str(EXTRAP) + 'USE_POI_' + str(USE_ROI_LOSS) + '/')
if not os.path.exists(plot_save_dir):
    os.mkdir(plot_save_dir)
for i in range(X_hat.shape[0]):
    for t in range(nt):
        plt.subplot(gs[t])