if K.image_dim_ordering() == 'th':
    # Transpose image dimensions (Theano uses the channels as the 1st dimension)
    im = im.transpose((2, 0, 1))

    # Use pre-trained weights for Theano backend
    weights_path = '../imagenet_models/densenet169_weights_th.h5'
else:
    # Use pre-trained weights for Tensorflow backend
    weights_path = '../imagenet_models/densenet169_weights_tf.h5'

# Insert a new dimension for the batch_size
im = np.expand_dims(im, axis=0)

# Test pretrained model
model = DenseNet(reduction=0.5, classes=1000, weights_path=weights_path)

sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

out = model.predict(im)

# Load ImageNet classes file
classes = []
with open('resources/classes.txt', 'r') as list_:
    for line in list_:
        classes.append(line.rstrip('\n'))

print('Prediction: ' + str(classes[np.argmax(out)]))
Exemple #2
0
del Y_train
del X_valid
del Y_valid

NumNonTrainable = [0,10,75,200,525]

X_testNP = np.zeros([len(X_test),224,224,3])

for id in range(0,len(X_testNP)):
    im = cv2.resize(X_test[id], (224, 224))
    X_testNP[id] = im

for ib in range(0,len(NumNonTrainable)):

    # Test pretrained model
    model = DenseNet(reduction=0.5, classes=classes, weights_path=weights_path, NumNonTrainable=NumNonTrainable[ib])

    # Learning rate is changed to 1e-3
    sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    start = time.time()
    model.fit_generator(generator=my_training_batch_generator,
                          epochs=5,
                          verbose=1,
                          shuffle=True,
                          validation_data=my_validation_batch_generator)

    model.save_weights('/home/shayan/Codes/DenseNet-Keras-master/adversarial_weights_tf_' + str(ib) + '.h5')
Exemple #3
0
        im = cv2.imread(filename)
        im = cv2.resize(im, (224, 224)).astype(np.float32)
        # Subtract mean pixel and multiple by scaling constant
        # Reference: https://github.com/shicai/DenseNet-Caffe
        im[:, :, 0] = (im[:, :, 0] - 103.94) * 0.017
        im[:, :, 1] = (im[:, :, 1] - 116.78) * 0.017
        im[:, :, 2] = (im[:, :, 2] - 123.68) * 0.017
        X_test.append(im)
        Y_test.append([1])

X_test = np.array(X_test)
Y_test = np.array(Y_test)
Y_test = to_categorical(Y_test)

# Test pretrained model
model = DenseNet(reduction=0.5, classes=classes)

# Learning rate is changed to 1e-3
sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.fit_generator(generator=my_training_batch_generator,
                    epochs=3,
                    verbose=1,
                    shuffle=True,
                    validation_data=my_validation_batch_generator)

f = open(
    "/home/shayan/PycharmProjects/DenseNet-Keras-master/results/Stat_Results_NonTrainable_NonWeight"
Exemple #4
0
    for filename in glob.glob(
            '/home/shayan/PycharmProjects/Dataset/Botnet/Test/' + str(i4) +
            '/*.png'):
        im = cv2.imread(filename)
        im = cv2.resize(im, (224, 224)).astype(np.float32)
        # Subtract mean pixel and multiple by scaling constant
        # Reference: https://github.com/shicai/DenseNet-Caffe
        im[:, :, 0] = (im[:, :, 0] - 103.94) * 0.017
        im[:, :, 1] = (im[:, :, 1] - 116.78) * 0.017
        im[:, :, 2] = (im[:, :, 2] - 123.68) * 0.017
        X_test.append(im)
        Y_test.append([1])

# Test pretrained model
model, logits = DenseNet(reduction=0.5,
                         classes=classes,
                         weights_path=weights_path)

# Learning rate is changed to 1e-3
sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)

model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])


class Dummy:
    pass


env = Dummy()
if K.image_dim_ordering() == 'th':
  # Transpose image dimensions (Theano uses the channels as the 1st dimension)
  im = im.transpose((2,0,1))

  # Use pre-trained weights for Theano backend
  weights_path = 'imagenet_models/densenet169_weights_th.h5'
else:
  # Use pre-trained weights for Tensorflow backend
  weights_path = 'imagenet_models/densenet169_weights_tf.h5'

# Insert a new dimension for the batch_size
im = np.expand_dims(im, axis=0)

# Test pretrained model
model = DenseNet(reduction=0.5, classes=10, weights_path=weights_path)

# Learning rate is changed to 1e-3
sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

out = model.predict(im)

# Load ImageNet classes file
classes = []
with open('resources/classes.txt', 'r') as list_:
    for line in list_:
        classes.append(line.rstrip('\n'))

print 'Prediction: '+str(classes[np.argmax(out)])
Exemple #6
0
        X_test.append(im)
        Y_test.append([1])

X_test = np.array(X_test)
Y_test = np.array(Y_test)
Y_test = to_categorical(Y_test)

NumNonTrainable = [75]

print(X_test.shape)
print(Y_test.shape)

for ib in range(0,len(NumNonTrainable)):

    # Test pretrained model
    model = DenseNet(reduction=0.5, classes=classes, weights_path=weights_path, NumNonTrainable=NumNonTrainable[ib])

    # Learning rate is changed to 1e-3
    sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    start = time.time()
    model.fit_generator(generator=my_training_batch_generator,
                          epochs=1,
                          verbose=1,
                          shuffle=True,
                          validation_data=my_validation_batch_generator)
    end = time.time()

    train_time = end - start
if K.image_dim_ordering() == 'th':
    # Transpose image dimensions (Theano uses the channels as the 1st dimension)
    im = im.transpose((2, 0, 1))

    # Use pre-trained weights for Theano backend
    weights_path = 'imagenet_models/densenet169_weights_th.h5'
else:
    # Use pre-trained weights for Tensorflow backend
    weights_path = 'imagenet_models/densenet169_weights_tf.h5'

# Insert a new dimension for the batch_size
im = np.expand_dims(im, axis=0)

# Test pretrained model
model = DenseNet(reduction=0.5,
                 classes=classes,
                 weights_path=weights_path,
                 NumNonTrainable=10)

# Learning rate is changed to 1e-3
sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])
'''
out = model.predict(im)

# Load ImageNet classes file
classes = []
with open('resources/classes.txt', 'r') as list_:
    for line in list_:
        classes.append(line.rstrip('\n'))
Exemple #8
0
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.tight_layout()


classes = 2

# Use pre-trained weights for Tensorflow backend
weights_path = 'imagenet_models/densenet169_weights_tf.h5'

# Test pretrained model
model = DenseNet(reduction=0.5,
                 classes=classes,
                 weights_path=weights_path,
                 NumNonTrainable=10)

# Learning rate is changed to 1e-3
sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

print('Start Reading Data')

# "0" = Botnet Traffic Data and "1" = Normal Traffic Data
X_train = []
Y_train = []

for i1 in range(0, 12):
Exemple #9
0
    'cloudy': 6,
    'conventional_mine': 7,
    'cultivation': 8,
    'habitation': 9,
    'haze': 10,
    'partly_cloudy': 11,
    'primary': 12,
    'road': 13,
    'selective_logging': 14,
    'slash_burn': 15,
    'water': 16
}
inv_label_map = {i: l for l, i in label_map.items()}

base_model = DenseNet(
    weights_path=
    "/mnt/home/dunan/Learn/Kaggle/planet_amazon/pretrained_weights/densenet169_weights_tf.h5"
)
x = base_model.get_layer('relu').output

x_newfc = GlobalAveragePooling2D(name='final_pool')(x)
x_newfc = Dense(n_classes, name='fc6')(x_newfc)
x_newfc = Activation('softmax', name='prob')(x_newfc)
model = Model(inputs=base_model.input, outputs=x_newfc)

sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

X_train = []
y_train = []