예제 #1
0
def load_data():
    data = hasy_tools.load_data()

    # One-Hot encoding
    data['y_train'] = np.eye(hasy_tools.n_classes)[data['y_train'].squeeze()]
    data['y_test'] = np.eye(hasy_tools.n_classes)[data['y_test'].squeeze()]

    # Preprocessing
    data['x_train'] = hasy_tools.preprocess(data['x_train'])
    data['x_test'] = hasy_tools.preprocess(data['x_test'])
    return data
예제 #2
0
def load_data():
    data = hasy_tools.load_data()

    # One-Hot encoding
    data['y_train'] = np.eye(hasy_tools.n_classes)[data['y_train'].squeeze()]
    data['y_test'] = np.eye(hasy_tools.n_classes)[data['y_test'].squeeze()]

    # Preprocessing
    data['x_train'] = hasy_tools.preprocess(data['x_train'])
    data['x_test'] = hasy_tools.preprocess(data['x_test'])
    return data
예제 #3
0
 def get_prediction(self, input_img):
     input_img = np.array(input_img)
     input_img = input_img.reshape(1, 32, 32, 1)
     prediction = self.model.predict(hasy_tools.preprocess(input_img))
     highest_prob_index = prediction.argmax()
     proba = prediction[0][highest_prob_index]
     label = self.labels[highest_prob_index]
     print('{}: {:0.2f}%'.format(label, proba * 100))
     return label
예제 #4
0
 def get_prediction(self, input_img):
     input_img = np.array(input_img)
     input_img = input_img.reshape(1, 32, 32, 1)
     prediction = self.model.predict(hasy_tools.preprocess(input_img))
     highest_prob_index = prediction.argmax()
     proba = prediction[0][highest_prob_index]
     label = self.labels[highest_prob_index]
     print('{}: {:0.2f}%'.format(label, proba * 100))
     return label
예제 #5
0
data = hasy_tools.load_data()

x_train = data["x_train"]
y_train = data["y_train"]
x_validate = data["x_train"]
y_validate = data["y_train"]
x_test = data["x_test"]
y_test = data["y_test"]

# One-Hot encoding
y_train = np.eye(hasy_tools.n_classes)[y_train.squeeze()]
y_validate = np.eye(hasy_tools.n_classes)[y_validate.squeeze()]
y_test = np.eye(hasy_tools.n_classes)[y_test.squeeze()]

# Preprocessing
x_train = hasy_tools.preprocess(x_train)
x_validate = hasy_tools.preprocess(x_validate)
x_test = hasy_tools.preprocess(x_test)

# Define the model
model = Sequential()
model.add(Flatten())
model.add(Dense(256, activation="tanh"))
model.add(Dropout(0.25))  # Drop 25% of the units
model.add(Dense(256, activation="tanh"))
model.add(Dense(hasy_tools.n_classes, activation="softmax"))

# Compile model
model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])
예제 #6
0
data = hasy_tools.load_data()

x_train = data['x_train']
y_train = data['y_train']
x_validate = data['x_train']
y_validate = data['y_train']
x_test = data['x_test']
y_test = data['y_test']

# One-Hot encoding
y_train = np.eye(hasy_tools.n_classes)[y_train.squeeze()]
y_validate = np.eye(hasy_tools.n_classes)[y_validate.squeeze()]
y_test = np.eye(hasy_tools.n_classes)[y_test.squeeze()]

# Preprocessing
x_train = hasy_tools.preprocess(x_train)
x_validate = hasy_tools.preprocess(x_validate)
x_test = hasy_tools.preprocess(x_test)

# Load the model
model = keras.models.load_model('checkpoint.h5')

# Compile model
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Visualize the model
print(model.summary())

# Evaluate the model
예제 #7
0
import hasy_tools

# Load the data
data = hasy_tools.load_data()

x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']

# One-Hot encoding
y_train = np.eye(hasy_tools.n_classes)[y_train.squeeze()]
y_test = np.eye(hasy_tools.n_classes)[y_test.squeeze()]

# Preprocessing
x_train = hasy_tools.preprocess(x_train)
x_test = hasy_tools.preprocess(x_test)

# Define the model
model = Sequential()
model.add(Flatten())
model.add(Dense(256, activation='tanh'))
model.add(Dense(256, activation='tanh'))
model.add(Dense(hasy_tools.n_classes, activation='softmax'))

# Compile model
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Fit the model
예제 #8
0
import hasy_tools

# Load the data
data = hasy_tools.load_data()

x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']

# One-Hot encoding
y_train = np.eye(hasy_tools.n_classes)[y_train.squeeze()]
y_test = np.eye(hasy_tools.n_classes)[y_test.squeeze()]

# Preprocessing
x_train = hasy_tools.preprocess(x_train)
x_test = hasy_tools.preprocess(x_test)

# Define the model
model = Sequential()
model.add(Flatten())
model.add(Dense(256))
model.add(Dense(hasy_tools.n_classes, activation='softmax'))

# Compile model
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Fit the model
csv_logger = CSVLogger('log.csv', append=True, separator=';')
예제 #9
0
# Load data
fold = 1
hasy_data = ht.load_data(mode='fold-{}'.format(fold), image_dim_ordering='tf')

x_train = hasy_data['x_train']
y_train = hasy_data['y_train']
x_test = hasy_data['x_test']
y_test = hasy_data['y_test']

# One-Hot encoding
y_train = np.eye(ht.n_classes)[y_train.squeeze()]
y_test = np.eye(ht.n_classes)[y_test.squeeze()]

# Preprocessing
x_train = ht.preprocess(x_train)
x_test = ht.preprocess(x_test)

# Define model
model = Sequential()
model.add(
    Convolution2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(PReLU())
model.add(Convolution2D(64, (3, 3), padding='same'))
model.add(PReLU())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(1024, activation='tanh'))
model.add(Dropout(0.5))
예제 #10
0
# internal modules
import hasy_tools

# Load the data
data = hasy_tools.load_data()
datasets = ['train', 'test']

# One-Hot encoding
for dataset in datasets:
    key = 'y_' + dataset
    data[key] = np.eye(hasy_tools.n_classes)[data[key].squeeze()]

# Preprocessing
for dataset in datasets:
    key = 'x_' + dataset
    data[key] = hasy_tools.preprocess(data[key])

# Generate Validation Data
split = train_test_split(data['x_train'], data['y_train'],
                         test_size=0.20,
                         random_state=0,
                         stratify=data['y_train'])
data['x_train'], data['x_val'], data['y_train'], data['y_val'] = split
datasets.append('val')


def skip_layer_conv(x, nb_layers=16):
    x1 = Conv2D(nb_layers, (3, 3), padding='same')(x)
    x1 = Activation('relu')(x1)
    x2 = Conv2D(nb_layers, (3, 3), padding='same')(x1)
    x2 = Activation('relu')(x2)
예제 #11
0
파일: main.py 프로젝트: wowothk/algorithms
from keras.regularizers import l1
from sklearn.model_selection import train_test_split

# Load the data
data = hasy_tools.load_data()
datasets = ['train', 'test']

# One-Hot encoding
for dataset in datasets:
    key = 'y_' + dataset
    data[key] = np.eye(hasy_tools.n_classes)[data[key].squeeze()]

# Preprocessing
for dataset in datasets:
    key = 'x_' + dataset
    data[key] = hasy_tools.preprocess(data[key])

# Generate Validation Data
split = train_test_split(data['x_train'],
                         data['y_train'],
                         test_size=0.20,
                         random_state=0,
                         stratify=data['y_train'])
data['x_train'], data['x_val'], data['y_train'], data['y_val'] = split
datasets.append('val')


def skip_layer_conv(x, nb_layers=16):
    x1 = Conv2D(nb_layers, (3, 3), padding='same')(x)
    x1 = Activation('relu')(x1)
    x2 = Conv2D(nb_layers, (3, 3), padding='same')(x1)
예제 #12
0
# Load data
fold = 1
hasy_data = ht.load_data(mode='fold-{}'.format(fold), image_dim_ordering='tf')

x_train = hasy_data['x_train']
y_train = hasy_data['y_train']
x_test = hasy_data['x_test']
y_test = hasy_data['y_test']

# One-Hot encoding
y_train = np.eye(ht.n_classes)[y_train.squeeze()]
y_test = np.eye(ht.n_classes)[y_test.squeeze()]

# Preprocessing
x_train = ht.preprocess(x_train)
x_test = ht.preprocess(x_test)

# Define model
model = Sequential()
model.add(
    Convolution2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(PReLU())
model.add(Convolution2D(64, (3, 3), padding='same'))
model.add(PReLU())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(1024, activation='tanh'))
model.add(Dropout(0.5))
예제 #13
0
import hasy_tools
import numpy as np
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential

# data loading
data = hasy_tools.load_data()
data['x_train'] = hasy_tools.preprocess(data['x_train'])
data['y_train'] = np.eye(369)[data['y_train']]
# data['x_test'] = hasy_tools.preprocess(data['x_train'])
# data['y_test'] = np.eye(369)[data['y_train']]

# model
model = Sequential()
model.add(Flatten())
model.add(Dense(254))
model.add(Activation('relu'))
model.add(Dense(369))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# fit
model.fit(data['x_train'], data['y_train'].squeeze(),
          epochs=10)

# save
model.save('model.h5')
예제 #14
0
import hasy_tools
import numpy as np
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.layers import Dense, Flatten, Activation
from keras.models import Sequential

# data loading
data = hasy_tools.load_data()
data['x_train'] = hasy_tools.preprocess(data['x_train'])
data['y_train'] = np.eye(369)[data['y_train']]
# data['x_test'] = hasy_tools.preprocess(data['x_train'])
# data['y_test'] = np.eye(369)[data['y_train']]

# model
model = Sequential()
model.add(Flatten())
model.add(Dense(254))
model.add(Activation('relu'))
model.add(Dense(369))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# fit
model.fit(data['x_train'], data['y_train'].squeeze(),
          epochs=10)

# save
model.save('model.h5')