def create_optimized_vgg16_model():
    # build the VGG16 network
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # load the weights of the VGG16 networks
    # (trained on ImageNet, won the ILSVRC competition in 2014)
    # note: when there is a complete match between your model definition
    # and your weight savefile, you can simply call model.load_weights(filename)
    assert os.path.exists(
        weights_path
    ), 'Model weights not found (see "weights_path" variable in script).'
    f = h5py.File(weights_path)
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            # we don't look at the last (fully-connected) layers in the savefile
            break
        g = f['layer_{}'.format(k)]
        weights = [
            g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])
        ]
        model.layers[k].set_weights(weights)
    f.close()
    print('Model loaded.')

    # build a classifier model to put on top of the convolutional model
    top_model = Sequential()
    top_model.add(Flatten(input_shape=model.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(1, activation='sigmoid'))

    # note that it is necessary to start with a fully-trained
    # classifier, including the top classifier,
    # in order to successfully do fine-tuning
    top_model.load_weights(top_model_weights_path)

    # add the model on top of the convolutional base
    model.add(top_model)

    # set the first 25 layers (up to the last conv block)
    # to non-trainable (weights will not be updated)
    for layer in model.layers[:25]:
        layer.trainable = False

    # compile the model with a SGD/momentum optimizer
    # and a very slow learning rate.
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
                  metrics=['accuracy'])

    return model
Example #2
0
    return x


model2 = load_model(
    '/home/lab5017/NIR_Filatov/Keras transfer learning kit light/MN_smcrob_4classes_10eps.h5'
)

# create config object
cfg = load_dict(CONFIG)

squeeze = SqueezeDet(cfg)
# dummy optimizer for compilation
sgd = optimizers.SGD(lr=cfg.LEARNING_RATE,
                     decay=0,
                     momentum=cfg.MOMENTUM,
                     nesterov=False,
                     clipnorm=cfg.MAX_GRAD_NORM)
squeeze.model.compile(optimizer=sgd,
                      loss=[squeeze.loss],
                      metrics=[
                          squeeze.bbox_loss, squeeze.class_loss,
                          squeeze.conf_loss,
                          squeeze.loss_without_regularization
                      ])
model = squeeze.model
i = 0

squeeze.model.load_weights(weights)

with open(img_file) as imgs:
testLabels = np.zeros([1300, numbers])
for i in range(6500):
    if i < 5200:
        trainingImages[i] = images[i]
        trainingLabels[i] = encodedLabels[i]
    else:
        testImages[i - 5200] = images[i]
        testLabels[i - 5200] = encodedLabels[i]

model = Sequential()
model.add(Dense(50, input_dim=pixels, activation='relu'))
for i in range(9):
    model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='softmax'))

sgd = optimizers.SGD(lr=0.001)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])
history = model.fit(trainingImages,
                    trainingLabels,
                    validation_split=0.16,
                    batch_size=512,
                    epochs=500,
                    verbose=2)

prediction = model.predict(testImages)
confMatrix = confusion_matrix(testLabels.argmax(axis=1),
                              prediction.argmax(axis=1))
print(confMatrix)
Example #4
0
                        -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
                        -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
                    ]))
        else:
            dataset.append(
                numpy.array(data["frames"][0]["persons"][0]["hand_pose"]
                            ["left"]["joints"] + data["frames"][0]["persons"]
                            [0]["hand_pose"]["right"]["joints"]))

model = Sequential()
model.add(Dense(128, input_dim=84, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(26, activation='softmax'))

sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

model.fit(numpy.array(dataset), numpy.array(alphas), epochs=500, batch_size=25)

print(model.evaluate(numpy.array(dataset), numpy.array(alphas)))

# Prediction Example
# for prediction in model.predict(numpy.array(dataset)):
#     print(numpy.argmax(prediction))

model_json = model.to_json()

with open("model.json", "w") as json_file2:
Example #5
0
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LearningRateScheduler
import tensorflow as tf
from models.simple_tunnel import DeepModel2


config = tf.ConfigProto(intra_op_parallelism_threads=7,\
                inter_op_parallelism_threads=7, allow_soft_placement=True,\
                        device_count = {'CPU' : 7, 'GPU' : 1})
session = tf.Session(config=config)
K.set_session(session)

deepModel = DeepModel2(64, 8).model

sgd = optimizers.SGD(lr=0.007,
                     decay=0.0,
                     momentum=0.05,
                     nesterov=True,
                     clipnorm=1.0)
deepModel.compile(loss='mean_absolute_error',
                  optimizer=sgd,
                  metrics=['mean_squared_error'])

MODEL_WEIGHTS_FILE = 'flow_simple_tunnel.h5'


def schedule(epoch, lr=0.01):
    lr_max = 0.01
    lr_min = 0.00001
    return lr_min + (0.6**(epoch % 10)) * lr_max

                                             input_tensor=model_input)
for layer in model.layers:
    layer.name = layer.name + str("_1")

#modification of Xception
x = model.output
x = GlobalAveragePooling2D()(x)
x = Dense(2048, activation='relu')(x)
x = Dropout(0.7)(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.7)(x)
predictions = Dense(num_classes, activation='softmax')(x)
xception1 = Model(inputs=model.input, outputs=predictions)

#definition of the optimizer
sgd2 = optimizers.SGD(lr=0.001, momentum=0.9, decay=0.00001, nesterov=True)

#model compilation
xception1.compile(optimizer=sgd2,
                  loss='categorical_crossentropy',
                  metrics=['accuracy', top3])

#model training
History = xception1.fit_generator(train_images,
                                  epochs=50,
                                  validation_data=val_images)

#writing of the training history
df1 = pd.DataFrame.from_records(History.history)
writer = ExcelWriter('feature_1.xlsx')
df1.to_csv(
Example #7
0
def experiment(X_train, Y_train, file):
	model = Sequential()
	model.add(Conv2D(20, (3, 3), padding='same',
                 input_shape=X_train.shape[1:]))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Conv2D(40, (3, 3), padding='same'))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Flatten())
	model.add(Dense(640))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))
	model.add(Dense(1000))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))
	model.add(Dense(10))
	model.add(Activation('softmax'))

	sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum = 0.95,  nesterov=True)
	adam = optimizers.Adadelta()
	model.compile(loss='categorical_crossentropy',
	              optimizer=sgd,
	              metrics=['accuracy'])
	

	

	print("Start training")

	model.fit(X_train, Y_train,  batch_size=128, nb_epoch=50, verbose=1)

	print("Start evaluation...")
	score = model.evaluate(X_test, Y_test, verbose=0)
	
	print(score)

	preds = model.predict( X_test, batch_size=None, verbose=0)

	preds = np.argmax(preds,1)

	print(preds.shape)
	wrong = 0
	mistakes = []

	for i in range(len(preds)):
       		if preds[i]!=y_test[i]:
               		wrong+=1
	                mistakes = np.append(mistakes, i)
	err = []
	print("Number of errors:")
	print(wrong)
	print(mistakes)
	err = np.append(err, wrong)

	print(err)


	model.save(file)
Example #8
0
model.add(Dropout(0.25))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(128, (2, 2), activation='relu'))
model.add(Convolution2D(128, (2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(48, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

# Compile the model
# note: these are the default sdg parameters to begin with
sgd = optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])


# need to load in the data
# x_train is built from adding all 3 spectra into a single "RGB"
def generate_training_data(batch_size):
    x_train = np.zeros((batch_size, 120, 120, 3), dtype=float)
    y_train = np.zeros((batch_size, 9), dtype=int)
    while True:
        for k in xrange(0, batch_size):
            with h5py.File('CdSeSim' + str(random.randint(1, N)) + '.hdf5',
                           'r') as f:
                grp = 'Sim' + str(random.randint(
                    1, 28))  # reserve Sim1 and Sim30 for validation
                tS1 = np.absolute(f[grp]['S1'][:])
	for e in range(50,100,20):
		lr = 0.001
		while lr<=0.003:
			m = 0.7
			while m<=0.9:
				# initialize network and layers
				network = keras.Sequential()
				network.add(Dense(hiddenLayersConf[0],activation='sigmoid',input_shape=(17,),use_bias=True))
				if(len(hiddenLayersConf)==2):
					network.add(Dense(hiddenLayersConf[1],activation='sigmoid',use_bias=True))
				if(len(hiddenLayersConf)==3):
					network.add(Dense(hiddenLayersConf[2],activation='sigmoid',use_bias=True))

				network.add(Dense(2, activation='sigmoid'))

				sgd = optimizers.SGD(lr=lr,momentum=m)
				network.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy','mean_squared_error'])

				# train the model
				hist = network.fit(trainInput,trainTarget,verbose=1,batch_size=128,epochs=e,validation_data=(validationInput, validationTarget))

				# evaluate model on test set
				scores = network.evaluate(testInput, testTarget, batch_size=128)
				print('\nConfiguration : Layers=',hiddenLayersConf,' Epochs=',e,' Learning rate=',lr,' Momentum=',m)
				print('Accuray on train data : ',hist.history.get('acc')[-1])
				print('Accuray on test data : ',scores[1])

				learn.insertModel(bestModels,(network,hist,scores))

				for i in range(len(bestModels)):
					model_json = bestModels[i][0].to_json()
X, Y = np.array(X) , np.array(Y)
X, Y = shuffle(X,Y)

index_train = np.random.choice(X.shape[0],int(X.shape[0]*train_rate),replace=False)
index_test  = list(set(range(X.shape[0])) - set(index_train))
                            
X, Y = shuffle(X,Y)
X_train, y_train = X[index_train],Y[index_train]
X_test, y_test = X[index_test],Y[index_test]

# Load fully convolution networks
model = FCN(nClasses = n_classes, input_width  = input_width, input_height = input_height)


# Optimize
sgd = optimizers.SGD(lr=1E-2, decay=5**(-4), momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])
print(model.summary())


#Fit the model
model.fit(X_train,y_train, validation_data=(X_test,y_test), batch_size=32, epochs=200, verbose=2)



##### Validation
print ("=================================================")
print ("Validation before CRFs")
print ("-------------------------")
Example #11
0
x = model.output
x = GlobalAveragePooling2D()(x)
x = Dense(2048, activation='relu')(x)
x = Dense(1024, activation='relu')(x)
x = Dense(512, activation='relu')(x)
predictions = Dense(257, activation='softmax')(x)

# creating the final model
model_final = Model(inputs=model.input, output=predictions)

if optimizer == 'adam':

    opt = optimizers.Adam(lr=lrate)

elif optimizer == 'sgd':
    opt = optimizers.SGD(lr=lrate, momentum=0.9)

# compile the model
model_final.compile(loss="categorical_crossentropy",
                    optimizer=opt,
                    metrics=["accuracy"])

# Initiate the train and test generators with data Augumentation
train_datagen = ImageDataGenerator(horizontal_flip=True,
                                   fill_mode="nearest",
                                   zoom_range=0.3,
                                   width_shift_range=0.3,
                                   height_shift_range=0.3,
                                   rotation_range=30)

val_datagen = ImageDataGenerator(horizontal_flip=True,
def train_multilayer_nn(model, xtrain, ytrain_1hot):
    sgd = optimizers.SGD(lr=0.01)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    model.fit(xtrain, ytrain_1hot, epochs=30, batch_size=32)
def train_binary_classifier(model, xtrain, ytrain_binary):
    sgd = optimizers.SGD(lr=0.01)
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    model.fit(xtrain, ytrain_binary, epochs=20, batch_size=32)
Example #14
0
    def _gen_model(self):
        """
        Generates models. Uses binary crossentropy with LOW LR as described in the lectures.
        :return:
        """
        def gen_generator(input_shape):
            model = KM.Sequential()
            model.add(
                KL.Dense(1024, input_shape=input_shape, activation='tanh'))

            model.add(KL.Dense(128 * 7 * 7))
            model.add(KL.BatchNormalization())
            model.add(KL.Activation('tanh'))

            model.add(KL.Reshape((7, 7, 128)))
            model.add(KL.UpSampling2D(size=(2, 2)))

            model.add(KL.Conv2D(64, (5, 5), padding='same', activation='tanh'))
            model.add(KL.UpSampling2D(size=(2, 2)))

            model.add(
                KL.Conv2D(self.channels, (5, 5),
                          padding='same',
                          activation='tanh'))
            return model

        self.generator = gen_generator(input_shape=(self.encoding_dim, ))

        def gen_discriminator(input_shape):
            model = KM.Sequential()
            model.add(
                KL.Conv2D(filters=64,
                          kernel_size=(5, 5),
                          padding='same',
                          input_shape=input_shape,
                          activation='tanh'))
            model.add(KL.MaxPooling2D(pool_size=(2, 2)))

            model.add(KL.Conv2D(128, (5, 5), activation='tanh'))
            model.add(KL.MaxPooling2D(pool_size=(2, 2)))

            model.add(KL.Flatten())
            model.add(KL.Dense(1024, activation='tanh'))
            model.add(KL.Dense(
                1, activation='sigmoid'))  # As suggested in the lectures.

            return model

        self.discriminator = gen_discriminator(input_shape=(28, 28,
                                                            self.channels))

        def gen_GAN(generator: KM.Sequential, discriminator: KM.Sequential):
            model = KM.Sequential()
            model.add(generator)
            discriminator.trainable = False
            model.add(discriminator)
            return model

        self.gan = gen_GAN(self.generator, self.discriminator)

        self.generator.compile(loss='binary_crossentropy',
                               optimizer=KO.SGD(lr=0.0004))
        self.gan.compile(loss='binary_crossentropy',
                         optimizer=KO.SGD(lr=0.0005))
        self.discriminator.trainable = True
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=KO.SGD(lr=0.0008))
Example #15
0
def main():

    dataset = load_dataset()

    train_data = np.asarray(dataset['train']['data'])
    train_labels = dataset['train']['label']
    num_classes = len(np.unique(train_labels))

    test_data = np.asarray(dataset['test']['data'])
    test_labels = dataset['test']['label']

    train_labels = to_categorical(train_labels, num_classes=num_classes)
    test_labels = to_categorical(test_labels, num_classes=num_classes)

    generator = dataset['generator']
    generator_kwargs = {'batch_size': batch_size}

    print('reps : ', reps)
    name = 'mnist_' + fs_network + '_' + classifier_network + '_r_' + str(
        regularization)
    print(name)
    model_kwargs = {'nclasses': num_classes, 'regularization': regularization}

    total_features = int(np.prod(train_data.shape[1:]))

    model_filename = directory + fs_network + '_trained_model.h5'
    classifier_filename = directory + classifier_network + '_trained_model.h5'
    if not os.path.exists(model_filename) and warming_up:

        model = getattr(network_models,
                        fs_network)(input_shape=train_data.shape[1:],
                                    **model_kwargs)
        print('training_model')
        model.fit_generator(
            generator.flow(train_data, train_labels, **generator_kwargs),
            steps_per_epoch=train_data.shape[0] // batch_size,
            epochs=110,
            callbacks=[callbacks.LearningRateScheduler(scheduler())],
            validation_data=(test_data, test_labels),
            validation_steps=test_data.shape[0] // batch_size,
            verbose=verbose)
        if not os.path.isdir(directory):
            os.makedirs(directory)
        model.save(model_filename)
        del model
        K.clear_session()

    nfeats = []
    accuracies = []
    times = []

    for factor in [.05, .1, .25, .5]:
        n_features = int(total_features * factor)
        n_accuracies = []
        n_times = []

        for r in range(reps):
            print('factor : ', factor, ' , rep : ', r)
            l2x_model = get_l2x_model(train_data.shape[1:], n_features)
            classifier = load_model(model_filename) if warming_up else getattr(
                network_models, fs_network)(input_shape=train_data.shape[1:],
                                            **model_kwargs)
            classifier_input = layers.Multiply()(
                [l2x_model.input, l2x_model.output])
            output = classifier(classifier_input)
            model = models.Model(l2x_model.input, output)

            optimizer = optimizers.adam(lr=1e-3)
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizer,
                          metrics=['acc'])
            model.classifier = classifier
            model.summary()
            start_time = time.time()
            model.fit_generator(
                generator.flow(train_data, train_labels, **generator_kwargs),
                steps_per_epoch=train_data.shape[0] // batch_size,
                epochs=80,
                callbacks=[],
                validation_data=(test_data, test_labels),
                validation_steps=test_data.shape[0] // batch_size,
                verbose=verbose)
            scores = l2x_model.predict(
                train_data, verbose=0, batch_size=batch_size).reshape(
                    (-1, np.prod(train_data.shape[1:]))).sum(axis=0)
            pos = np.argsort(scores)[::-1][:n_features]
            n_times.append(time.time() - start_time)
            mask = np.zeros_like(scores)
            mask[pos] = 1.
            mask = mask.reshape(train_data.shape[1:])
            del l2x_model, classifier, model
            K.clear_session()
            classifier = load_model(
                classifier_filename) if warming_up else getattr(
                    network_models, classifier_network)(
                        input_shape=train_data.shape[1:], **model_kwargs)
            optimizer = optimizers.SGD(lr=1e-1)
            classifier.compile(loss='categorical_crossentropy',
                               optimizer=optimizer,
                               metrics=['acc'])
            classifier.fit_generator(
                generator.flow(mask * train_data, train_labels,
                               **generator_kwargs),
                steps_per_epoch=train_data.shape[0] // batch_size,
                epochs=80,
                callbacks=[
                    callbacks.LearningRateScheduler(scheduler(extra=0)),
                ],
                validation_data=(mask * test_data, test_labels),
                validation_steps=test_data.shape[0] // batch_size,
                verbose=verbose)

            n_accuracies.append(
                classifier.evaluate(mask * test_data, test_labels,
                                    verbose=0)[-1])
            del classifier
            K.clear_session()
        print('n_features : ', n_features, ', acc : ', n_accuracies,
              ', time : ', n_times)
        accuracies.append(n_accuracies)
        nfeats.append(n_features)
        times.append(n_times)

    output_filename = directory + fs_network + '_' + classifier_network + '_l2x_results_warming_' + str(
        warming_up) + '.json'

    try:
        with open(output_filename) as outfile:
            info_data = json.load(outfile)
    except:
        info_data = {}

    if name not in info_data:
        info_data[name] = []

    info_data[name].append({
        'regularization': regularization,
        'reps': reps,
        'classification': {
            'n_features': nfeats,
            'accuracy': accuracies,
            'times': times
        }
    })

    with open(output_filename, 'w') as outfile:
        json.dump(info_data, outfile)
def test_sgd_normalized_l1_l2():
    sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
    sgd = NormalizedOptimizer(sgd, normalization='l1_l2')
    _test_optimizer(sgd, target=0.45)
    _test_no_grad(sgd)
def test_embedding_with_clipnorm():
    model = Sequential()
    model.add(layers.Embedding(input_dim=1, output_dim=1))
    model.compile(optimizer=optimizers.SGD(clipnorm=0.1), loss='mse')
    model.fit(np.array([[0]]), np.array([[[0.5]]]), epochs=1)
def test_sgd_normalized_std():
    sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
    sgd = NormalizedOptimizer(sgd, normalization='std')
    _test_optimizer(sgd)
    _test_no_grad(sgd)
def main():
    file_lines = []
    dataset = sys.argv[1]
    # path to the model weights files.

    ###weights_path = 'weights/vgg16_first_training_raspberry_weights.h5'
    weights_path = 'weights/vgg16_weights.h5'

    # dimensions of our images.

    predict_mcc, validation_data_dir = dataset_to_parameters(dataset)

    # load the weights of the VGG16 networks
    # (trained on ImageNet, won the ILSVRC competition in 2014)
    # note: when there is a complete match between your model definition
    # and your weight savefile, you can simply call model.load_weights(filename
    #model = vgg16(weights_path)

    ###model, top_model = vgg16(weights_path)
    model, top_model = vgg16_original(weights_path)

    assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
    #model.load_weights(weights_path)
    '''
    f = h5py.File(weights_path)
    for k in range(len(f.attrs['layer_names'])):
       g = f[f.attrs['layer_names'][k]]
       weights = [g[g.attrs['weight_names'][p]] for p in range(len(g.attrs['weight_names']))]
       if k >= len(model.layers):
           top_model.layers[k-len(model.layers)].set_weights(weights)
       else:
           model.layers[k].set_weights(weights)
    f.close()
    '''
    with h5py.File(weights_path) as weights_file:
        for k in range(weights_file.attrs['nb_layers']):
            if k >= len(model.layers):
                # we don't look at the last (fully-connected) layers in the savefile
                break
            g = weights_file['layer_{}'.format(k)]
            weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
            model.layers[k].set_weights(weights)


    print('Model loaded.')

    # build a classifier model to put on top of the convolutional model

    # set the first 25 layers (up to the last conv block)
    # to non-trainable (weights will not be updated)
    for layer in model.layers[:25]:
        layer.trainable = False

    # compile the model with a SGD/momentum optimizer
    # and a very slow learning rate.
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
                  metrics=['accuracy'])


    validation_images, validation_labels = create_validationImg_validationLabel_list(predict_mcc, validation_data_dir)
    validation = np.array(load_im2(validation_images))

    np.savetxt("tsne/validation_labels/vgg16_validation_labels_{}.txt".format(dataset), validation_labels)

    #predicted_labels = model.predict(validation)
    predicted_features = model.predict(validation)
    np.savetxt("tsne/predicted_features/vgg16_predicted_features_{}.txt".format(dataset), predicted_features)
def test_sgd_normalized_average_l1_l2():
    sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
    sgd = NormalizedOptimizer(sgd, normalization='avg_l1_l2')
    _test_optimizer(sgd)
    _test_no_grad(sgd)
    for i in range(3):
        x_train[:, :, :, i] = (x_train[:, :, :, i] - mean[i]) / std[i]
        x_val[:, :, :, i] = (x_val[:, :, :, i] - mean[i]) / std[i]
        x_test[:, :, :, i] = (x_test[:, :, :, i] - mean[i]) / std[i]

    # build network
    img_input = Input(shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS))
    output = resnext(img_input, CLASS_NUM)
    resnet = Model(img_input, output)

    print(resnet.summary())

    # set optimizer

    parallel_model = multi_gpu_model(resnet, gpus=2)
    sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
    parallel_model.compile(loss='categorical_crossentropy',
                           optimizer=sgd,
                           metrics=['accuracy'])

    # set callback
    tb_cb = TensorBoard(log_dir='./resnext/', histogram_freq=0)
    change_lr = LearningRateScheduler(scheduler)
    ckpt = ModelCheckpoint('./ckpt.{epoch:02d}-{val_acc:.4f}.h5',
                           monitor='val_acc',
                           save_best_only=True,
                           mode='max',
                           period=25)
    cbks = [change_lr, tb_cb, ckpt]

    # set data augmentation
def test_clipvalue_normalized():
    sgd = optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=0.5)
    sgd = NormalizedOptimizer(sgd, normalization='l2')
    _test_optimizer(sgd)
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(
    x)  # changing to detect only two types

model = Model(inputs=base_model.input, output=predictions)

########################################################################

## Freez the base model #################

for layer in base_model.layers:
    layer.trainable = False  # all layers are  not trainable

#  Optimizer (SGD)
optimizer = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9,
                           nesterov=True)  #optimizers.rmsprop()

# Model compilation
# Chaning loss function: categorical_crossentropy to binary_crossentropy
model.compile(optimizer=optimizer,
              loss='binary_crossentropy',
              metrics=['accuracy'])

print("Model compiled")

print("Start the last layers trainig...")
history_1 = model.fit_generator(train_generator,
                                train_generator.n // batch_size,
                                epochs=number_of_epochs_pretraining,
                                workers=4,
                                validation_data=validation_generator,
Example #24
0
# add batchNormalization here
branch = BatchNormalization()(branch)

# using activation function relu
branch_vgg16 = Activation("relu")(branch)
nn = Dropout(0.5)(branch_vgg16)
nn = Dense(512, use_bias=False, kernel_initializer='uniform')(nn)
nn = BatchNormalization()(nn)
nn = Activation("relu")(nn)
nn = Dropout(0.5)(nn)
nn = Dense(37, kernel_initializer='uniform', activation="softmax")(nn)
model = Model(inputs=input_vgg16, outputs=[nn])
model.summary()

# compile the model
model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
              metrics=['accuracy', 'top_k_categorical_accuracy'])

# save each model after each epoch into the specific file path
checkpointer = ModelCheckpoint(filepath='saved_models/VGG16_ma.hdf5', verbose=1, save_best_only=True)

# feed the model with the train data and validation data
history = model.fit(train_vgg16, train_targets,
                    validation_data=(valid_vgg16, valid_targets),
                    epochs=30, batch_size=4, callbacks=[checkpointer], verbose=1)

# plot the history for top-1 accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('VGG16 model top-1 accuracy')
plt.ylabel('top-1 accuracy')
def CNNBiGRU(traindata, trainlabel, testdata, testlabel, TotalSequenceLength):

    # Model
    model = Sequential()

    # Convolution layer
    model.add(
        Convolution2D(batch_input_shape=(None, TotalSequenceLength, 4, 1),
                      filters=32,
                      kernel_size=4,
                      strides=1,
                      padding='same',
                      data_format='channels_last'))

    # Batch Normalization layer
    normalization.BatchNormalization(axis=1,
                                     momentum=0.99,
                                     epsilon=0.001,
                                     center=True,
                                     scale=True,
                                     beta_initializer='zeros',
                                     gamma_initializer='ones',
                                     moving_mean_initializer='zeros',
                                     moving_variance_initializer='ones',
                                     beta_regularizer=None,
                                     gamma_regularizer=None,
                                     beta_constraint=None,
                                     gamma_constraint=None)

    # Activation function
    model.add(Activation('relu'))

    # MaxPooling layer
    model.add(
        MaxPooling2D(pool_size=4,
                     strides=4,
                     padding='same',
                     data_format='channels_last'))

    # Convolution layer
    model.add(
        Convolution2D(64,
                      4,
                      strides=1,
                      padding='same',
                      data_format='channels_first'))

    # Batch Normalization layer
    normalization.BatchNormalization(axis=1,
                                     momentum=0.99,
                                     epsilon=0.001,
                                     center=True,
                                     scale=True,
                                     beta_initializer='zeros',
                                     gamma_initializer='ones',
                                     moving_mean_initializer='zeros',
                                     moving_variance_initializer='ones',
                                     beta_regularizer=None,
                                     gamma_regularizer=None,
                                     beta_constraint=None,
                                     gamma_constraint=None)

    # Activation function
    model.add(Activation('relu'))

    # MaxPooling layer
    model.add(MaxPooling2D(4, 4, 'same', data_format='channels_last'))

    # Flatten layer
    model.add(TimeDistributed(Flatten()))

    # BiGRU
    model.add(
        Bidirectional(
            GRU(units=64,
                activation='tanh',
                recurrent_activation='hard_sigmoid',
                use_bias=True,
                kernel_initializer='glorot_uniform',
                recurrent_initializer='orthogonal',
                bias_initializer='zeros',
                kernel_regularizer=None,
                recurrent_regularizer=None,
                bias_regularizer=None,
                activity_regularizer=None,
                kernel_constraint=None,
                recurrent_constraint=None,
                bias_constraint=None,
                dropout=0,
                recurrent_dropout=0,
                implementation=1,
                return_sequences=False,
                return_state=False,
                go_backwards=False,
                stateful=False,
                unroll=False,
                reset_after=False)))

    # Drouout layer
    model.add(Dropout(0.5))

    # fully-connected layer
    model.add(Dense(2))
    model.add(Activation('softmax'))

    # optimizer
    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # training
    print('Training --------------')
    model.fit(traindata, trainlabel, epochs=10, batch_size=64, verbose=1)

    # test
    print('\nTesting---------------')
    loss, accuracy = model.evaluate(testdata, testlabel)

    # get the confidence probability
    resultslabel = model.predict(testdata)

    return resultslabel
Example #26
0
for n in [120, 84]:
    if use_ste_layers:
        x = STE(n,
                ensemble_size=ensemble_size,
                activation='tanh',
                dropconnect=ste_dropconnect)(x)
    else:
        x = Dense(n, activation='tanh')(x)
    x = Dropout(0.5)(x)
x = Dense(num_classes, activation='softmax')(x)
model = Model(input=input_layer, output=x)

# Train model
batch_size = 128
n_epochs = 256
opt = optimizers.SGD(lr=1e-2, decay=3e-4, momentum=0.9, nesterov=True)
model.compile(loss=keras.losses.sparse_categorical_crossentropy,
              optimizer=opt,
              metrics=['accuracy'])
keras.utils.print_summary(model)
es = EarlyStopping(patience=8, restore_best_weights=True)
model.fit(x_train,
          y_train,
          validation_split=0.1,
          batch_size=batch_size,
          epochs=n_epochs,
          verbose=1,
          callbacks=[es])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
scaled = scaled.drop(columns=["point_timestamp", "future"])
scaled_train = scaled[0:len(bg2_train)]
scaled_test = scaled[(len(bg2_train)):len(scaled)]

scaled_test1 = scaled_test.copy()
scaled_test1["day_night"] = scaled_test1["day_night"].astype("int")
list_pred = []
seed(500)
#model begins here
model = Sequential()
model.add(Dense(1024, input_dim=6, activation='linear'))
model.add(Dense(512, activation='linear'))
model.add(Dense(64, activation='linear'))
model.add(Dense(1, activation='linear'))
sgd = optimizers.SGD(lr=0.1)
model.compile(loss='mean_squared_error',
              optimizer=sgd,
              metrics=['accuracy', 'mse'])

seed(300)
model.fit(scaled_train.loc[:, [
    'point_value.mg.dL', 'point_value', 'point_value.kilometers', 'maverage',
    'speed', 'day_night'
]],
          scaled_train.loc[:, "Y"],
          epochs=1000,
          batch_size=1000)

for i in range(1, 13):
    pr_nn = model.predict(scaled_test1.drop(columns=["Y"]))
Example #28
0
model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           padding='same',
           activation='relu',
           input_shape=X.shape[1:]))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.summary()

optimizer = optimizers.SGD(lr=learning_rate, momentum=momentum, decay=decay)
model.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])

history = model.fit(X,
                    Y,
                    batch_size=32,
                    epochs=10,
                    verbose=2,
                    validation_split=0.3)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
top_model = Sequential()
top_model.add(Flatten(input_shape=model_vgg.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))

model = Model(inputs=model_vgg.input, outputs=top_model(model_vgg.output))
model.summary()

model.load_weights("model/multiClassifier-weights-improvement-01-0.304.h5")

for layer in model_vgg.layers[:15]:
    layer.trainable = False

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
              metrics=['accuracy'])

print("Created model and loaded weights from file")

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_data_dir,
    y_test = keras.utils.to_categorical(y_test, CLASS_NUM)

    # color preprocessing
    x_train, x_test, x_val = color_preprocessing(x_train, x_test, x_val)

    # build network
    img_input = Input(shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS))
    output = wide_residual_network(img_input, CLASS_NUM, DEPTH, WIDE)
    resnet = Model(img_input, output)
    print(resnet.summary())

    # set optimizer

    parallel_model = multi_gpu_model(resnet, gpus=2)
    parallel_model.compile(optimizer=optimizers.SGD(lr=.1,
                                                    momentum=0.9,
                                                    nesterov=True),
                           loss=[focal_loss(classes_num)],
                           metrics=['accuracy'])
    # set callback
    tb_cb = TensorBoard(log_dir=LOG_FILE_PATH, histogram_freq=0)
    change_lr = LearningRateScheduler(scheduler)
    # lr_reducer = ReduceLROnPlateau(monitor='val_acc',factor=0.2,patience=5,
    #                            mode='max',min_lr=1e-3)
    cbks = [change_lr, tb_cb]

    # set data augmentation
    print('Using real-time data augmentation.')
    datagen = ImageDataGenerator(horizontal_flip=True,
                                 width_shift_range=0.125,
                                 height_shift_range=0.125,