예제 #1
0
        if kernel_type == "gaussian":
            clf = my_SVM.my_SVM_train("gaussian",
                                      my_SVM.my_Kernel.gaussian(50))
        else:
            clf = my_SVM.my_SVM_train("linear", my_SVM.my_Kernel.linear())

        _clf = clf.train(np.asarray(trainingX), np.asarray(trainingY))
        average += _clf.score(_clf.predict(np.asarray(testX)),
                              np.asarray(testY))

    return average / 100


# Loading Images

dataset = load_images.load()

# Extracting Features

corner_dataset = corner_feature.corner_feature(dataset)
edge_dataset = edge_feature.edge_feature(load_images.load())
hog_dataset = hog_feature.hog_feature(load_images.load())

# Corner

# Linear Kernel -> Accuracy  64.26%
print(my_classifier("linear", corner_dataset))

# Gaussian Kernel -> Accuracy  52.41%
print(my_classifier("gaussian", corner_dataset))
예제 #2
0
from model_architecture import Model
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam

epochs = 25
learning_rate = 0.001
bs = 32
data = []
labels = []
architecture = Model(64, 64, 3, 3)
model = architecture.model3()
model.summary()
ld = load(64, 64, 3,
          [['D:/blind project 2/left'], ['D:/blind project 2/right'],
           ['D:/blind project 2/center']])

data, labels = ld.imgload()
# print(data)
c = list(zip(data, labels))
random.shuffle(c)
data[:], labels[:] = zip(*c)
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
trainX, testX, trainY, testY = train_test_split(data,
                                                labels,
                                                test_size=0.1,
                                                random_state=42)
trainY = to_categorical(trainY, num_classes=3)
testY = to_categorical(testY, num_classes=3)
from load_images import load
from matplotlib import pyplot
import numpy as np

X, y = load()

avg_kaggle_image = np.zeros((96, 96))

for i in range(X.shape[0]):	
	avg_kaggle_image += X[i].reshape(96, 96)

avg_kaggle_image /= X.shape[0]
fig = pyplot.figure(figsize=(1, 1))
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
ax.imshow(avg_kaggle_image, cmap="gray")
pyplot.show()
예제 #4
0
    clip_disc_weights = None

# For saving samples
fixed_noise = tf.constant(np.random.normal(size=(128, 128)).astype('float32'))
fixed_noise_samples = Generator(128, noise=fixed_noise)


def generate_image(frame, true_dist):
    samples = session.run(fixed_noise_samples)
    lib.save_images.save_images(samples.reshape((128, 28, 28)),
                                out_dir + 'samples_{}.png'.format(frame))


# Dataset iterator
#train_gen, dev_gen, test_gen = lib.mnist.load(BATCH_SIZE, BATCH_SIZE)
train_gen, dev_gen = loader.load(BATCH_SIZE, BATCH_SIZE, imarr_fn)


def inf_train_gen():
    while True:
        for images in train_gen():
            yield images


print("Training")
# Train loop
with tf.Session() as session:

    session.run(tf.initialize_all_variables())

    gen = inf_train_gen()
예제 #5
0
        x = np.append(px, nx, axis=0)
        y = np.append(py, ny)
        return (x, y)



def train_svm(feature, label):
        clf = AdaBoostClassifier(svm.SVC(probability=True,kernel='linear'),n_estimators=20)
        clf.fit(feature, label)
        return clf

if __name__ == "__main__":

        minWinSize = [64, 64]

        p_images = li.load("positive_train_images")
        n_images = li.load("negative_train_images")
        p_test_images = li.load("positive_test_images")
        n_test_images = li.load("negative_test_images")

        test_images = np.append(p_test_images, n_test_images, axis=0)

        px = np.array(extract_hog(p_images))
        nx = np.array(extract_hog(n_images))

        py = np.ones(len(p_images))
        ny = np.zeros(len(n_images))

        test_py = np.ones(len(p_test_images))
        test_ny = np.zeros(len(n_test_images))
예제 #6
0
def model_main():
    start = time.time()
    tf.logging.set_verbosity(tf.logging.DEBUG)
    _logger = logging.getLogger("tensorflow")

    _logger.info("--------------------- Load Kubernetes Config ---------------------")
    tf_config = kubernetes_resolver.build_config()
    os.environ['TF_CONFIG'] = str(tf_config)
    worker_index = kubernetes_resolver.fetch_task_index()
    num_workers = len(kubernetes_resolver.build_worker_list())

    # Local setup
    #
    # worker_index = None
    # num_workers = 3

    _logger.info("--------------------- Load Data ---------------------")
    (x_train, y_train), (x_test, y_test) = load_images.load()

    _logger.info("--------------------- Set RunConfiguration ---------------------")
    distribution = tf.contrib.distribute.CollectiveAllReduceStrategy(num_gpus_per_worker=0)
    config = tf.estimator.RunConfig(train_distribute=distribution,
                                    eval_distribute=distribution)

    # Local setup
    #
    # config = None

    # Create estimator
    _logger.info("--------------------- Create Estimator ---------------------")
    keras_estimator = tf.keras.estimator.model_to_estimator(
        keras_model=create_model(), config=config, model_dir='./model')

    train_spec = tf.estimator.TrainSpec(
        input_fn=lambda: input_fn(img=x_train, label=y_train,
                                  num_workers=num_workers,
                                  worker_index=worker_index,
                                  shuffle=True), max_steps=math.floor(1000 / num_workers))
    eval_spec = tf.estimator.EvalSpec(
        input_fn=lambda: input_fn(img=x_test, label=y_test,
                                  num_workers=num_workers,
                                  worker_index=worker_index,
                                  shuffle=False), steps=100)

    # Create estimator
    tf.LogMessage()
    _logger.info("--------------------- Start Training ---------------------")
    tf.estimator.train_and_evaluate(keras_estimator, train_spec, eval_spec)
    _logger.info("--------------------- Finish training ---------------------")
    end = time.time()
    time_diff = end - start
    _logger.info('--------------------- Estimate time ---------------------')
    _logger.info('Tensorflow Time start: {}'.format(start))
    _logger.info('Tensorflow Time end: {}'.format(end))
    _logger.info('Tensorflow Time elapased: {}'.format(time_diff))
    _logger.info("--------------------- Start Export ---------------------")
    export_dir = keras_estimator.export_savedmodel(
        export_dir_base="./dist",
        serving_input_receiver_fn=serving_input_fn)

    _logger.info("--------------------- Finish Export on Path %s ---------------------"
                 % export_dir)

    _logger.info("--------------------- Start Tensorboard ---------------------")
    if "TF_CONFIG" in os.environ:
        config = os.environ['TF_CONFIG']
        if "\"type\": \"chief\"" in config:
            os.system('tensorboard --logdir=/notebooks/app/model --port=6006')
예제 #7
0
from model_architecture import Model
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam

epochs = 25
learning_rate = 0.001
bs = 32
data = []
labels = []
architecture = Model(64, 64, 3, 3)
model = architecture.model3()
model.summary()
ld = load(64, 64, 3,
          [['C:\Users\ROUSHAN K\Desktop\MDD_PROJECT\SMART_SPECS\LEFT'],
           ['C:\Users\ROUSHAN K\Desktop\MDD_PROJECT\SMART_SPECS\RIGHT'],
           ['C:\Users\ROUSHAN K\Desktop\MDD_PROJECT\SMART_SPECS\CENTRE']])

data, labels = ld.imgload()
# print(data)
c = list(zip(data, labels))
random.shuffle(c)
data[:], labels[:] = zip(*c)
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
trainX, testX, trainY, testY = train_test_split(data,
                                                labels,
                                                test_size=0.1,
                                                random_state=42)
trainY = to_categorical(trainY, num_classes=3)
testY = to_categorical(testY, num_classes=3)
예제 #8
0
parser.add_argument('--gpu', '-g', default=-1, type=int,
                    help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if args.gpu >= 0:
    cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np

batchsize = 100
n_epoch = 5

resize = 50
predict_no = 80

#load train data
print('load Sunshine dataset')
x_train, x_test, y_train, y_test = load_images.load("./image",11,10,resize,resize)

cv2.imshow("predict",x_test[predict_no])
cv2.waitKey(0)


#Normalization 0~1
x_train = np.array(x_train).astype(np.float32).reshape((len(x_train),3, resize, resize)) / 255
y_train = np.array(y_train).astype(np.int32)
x_test = np.array(x_test).astype(np.float32).reshape((len(x_test),3, resize, resize)) / 255
y_test = np.array(y_test).astype(np.int32)

N = len(y_train)
print(N)
print(len(x_test))
N_test = y_test.size
예제 #9
0
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
import load_images
from IPython import display

# this is the code that trains and saves GANs on my images
# the code itself was mostly copied from https://www.tensorflow.org/tutorials/generative/dcgan
# I had to modify the architectures of the generator and discriminator and change the training snapshot code
# because my images were RGB and had larger dimensions than the data the example was written for

train_images = load_images.load(num=-1)

BUFFER_SIZE = len(train_images)
BATCH_SIZE = 64

# Batch and shuffle the data
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(
    BUFFER_SIZE).batch(BATCH_SIZE)


# This is where the generator's layer structure is defined
# I had to change this significantly
def make_generator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(8 * 5 * 256, use_bias=False, input_shape=(100, )))
    model.add(layers.BatchNormalization())
예제 #10
0
import numpy as np
from model_architecture import Model
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam

epochs = 25
learning_rate = 0.001
bs = 32
data = []
labels = []
architecture = Model(64, 64, 3, 3)
model = architecture.model3()
model.summary()
ld = load(64, 64, 3, [['D:/blind project 2/left'], ['D:/blind project 2/right'], ['D:/blind project 2/center']])

data, labels = ld.imgload()
# print(data)
c = list(zip(data, labels))
random.shuffle(c)
data[:], labels[:] = zip(*c)
data = np.array(data, dtype="float")/255.0
labels = np.array(labels)
trainX, testX, trainY, testY = train_test_split(data, labels, test_size=0.1, random_state=42)
trainY = to_categorical(trainY, num_classes=3)
testY = to_categorical(testY, num_classes=3)
aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2,
                         zoom_range=0.2, fill_mode="nearest")
opt = Adam(lr=learning_rate, decay=learning_rate/epochs)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])