# Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)

    total_samples_train = getNumSamples(variants[num_variant][0][0:4] + '.h5')
    model.fit_generator(generate_arrays(TRAIN_SET,
                                        batch_size=BATCH_SIZE,
                                        max_sample=total_samples_train,
                                        new_size=INPUT_FRAME_SIZE),
                        BATCH_SIZE,
                        EPOCHS,
                        verbose=2,
                        callbacks=[best_model],
                        validation_data=(x_test, y_test))
    print("Finished fitting model")
    score = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    print('All metrics', score)

    x_train = HDF5Matrix(TRAIN_SET, 'data')
    y_train = HDF5Matrix(TRAIN_SET, 'labels')

    res = model.predict(x_test)
    res_label = np.argmax(res, 1)
    print('\ntest:', sum(res_label == y_t) / float(len(y_t)) * 100)

    res = model.predict(x_train)
    res_label = np.argmax(res, 1)
    print('train:', sum(res_label == y_tr) / float(len(y_tr)) * 100)
        lrate = initial_lrate * (drop ** np.floor(epoch/epochs_drop)) 
    else:
        lrate = initial_lrate * (drop ** np.floor((epoch-1)/epochs_drop)) 

    print(lrate)
    return lrate

lrate = LearningRateScheduler(step_decay)
	
Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0, amsgrad=False)

model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])

csv_logger = CSVLogger('./training_Xception.log', append=True)
checkpointer = ModelCheckpoint(filepath='./weights_Xception.hdf5', verbose=1, save_best_only=True, monitor = 'val_acc')
callbacks_list = [checkpointer,csv_logger,lrate]

model.fit_generator(traning_set, epochs=100, steps_per_epoch=len(x_train) / 50, validation_steps=len(x_valid) / 50, verbose=1, validation_data=validation_set,callbacks=callbacks_list)

score = model.evaluate(x_valid, y_valid, verbose=0)

print(score)








Exemple #3
0
def main(args):

    # hyper parameters
    batch_size = 16
    num_classes = 102
    epochs = 100

    # Instantiate model
    model = Xception(include_top=True, weights=None, classes=num_classes)

    # prepare data
    x_train = np.load(os.path.join(current_directory, 'x_train.npy'))
    y_train = np.load(os.path.join(current_directory, 'y_train.npy'))
    x_test = np.load(os.path.join(current_directory, 'x_test.npy'))
    y_test = np.load(os.path.join(current_directory, 'y_test.npy'))

    # summary of the model
    model.summary()

    # compile model
    model.compile(
        loss=categorical_crossentropy,
        optimizer=Adadelta(),
        metrics=['accuracy']
    )

    # learning section
    hist = model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=(x_test, y_test)
    )

    # evaluation section
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    # save graphs
    acc = hist.history['acc']
    val_acc = hist.history['val_acc']
    loss = hist.history['loss']
    val_loss = hist.history['val_loss']

    plt.plot(range(epochs), loss, marker='.', label='acc')
    plt.plot(range(epochs), val_loss, marker='.', label='val_acc')
    plt.legend(loc='best')
    plt.grid()
    plt.xlabel('epoch')
    plt.ylabel('acc')
    plt.savefig(os.path.join(current_directory, 'acc_xception.png'))
    plt.clf()

    plt.plot(range(epochs), acc, marker='.', label='loss')
    plt.plot(range(epochs), val_acc, marker='.', label='val_loss')
    plt.legend(loc='best')
    plt.grid()
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.savefig(os.path.join(current_directory, 'loss_xception.png'))
    plt.clf()
Exemple #4
0

lb = LabelBinarizer()
lb.fit(np.asarray(data['primary_microconstituent']))
y = lb.transform(labels)
print('\nLabels Binarized, converting array')


input = np.asarray(processed_imgs)

X_train, X_test, y_train, y_test = train_test_split(
    input, y, test_size=0.1, random_state=42)


model = Xception(weights=None, classes = 7)

model.summary()
model.compile(loss = 'categorical_crossentropy', optimizer = 'sgd', metrics = ['accuracy'])

model.fit(X_train, y_train, epochs = 5, batch_size = 32, validation_data=(X_test, y_test))
name = 'results/UHCS_Xception_Weights'
score = model.evaluate(X_test, y_test)
print('Test score:', score[0])
print('Test accuracy:', score[1])
model.save_weights(name+'.h5')

file = open('Xception.txt', 'w')
file.write('Test score:', score[0])
file.write('Test accuracy:', score[1])
file.close()
Exemple #5
0
    Adam(lr=0.005,
         beta_1=0.9,
         beta_2=0.999,
         epsilon=None,
         decay=0.0,
         amsgrad=False)

    model.compile(optimizer='Adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    csv_logger = CSVLogger('./training_Xception_ten_res{}.log'.format(i),
                           append=True)
    checkpointer = ModelCheckpoint(
        filepath='./weights_Xception_ten_res{}.hdf5'.format(i),
        verbose=1,
        save_best_only=True,
        monitor='val_acc')
    callbacks_list = [checkpointer, csv_logger, lrate]
    model.fit_generator(traning_set,
                        epochs=50,
                        steps_per_epoch=len(x_train) / 50,
                        verbose=1,
                        validation_data=validation_set,
                        validation_steps=len(x_valid) / 50,
                        callbacks=callbacks_list)

    score.append(model.evaluate(x_valid, y_valid, verbose=0))
    print(score[i])