Ejemplo n.º 1
0
def train_cnn(height, width, num_classes, batch_size, epochs, learning_rate,
              schedule_type, find_lr, train_gen, val_gen, num_start_filters,
              kernel_size, fcl_size):

    # Store the number of epochs to train for in a convenience variable,
    # then initialize the list of callbacks and learning rate scheduler
    # to be used
    callbacks = []
    schedule = None

    if find_lr != True:
        # check to see if step-based learning rate decay should be used
        if schedule_type == "step":
            print("[INFO] using 'step-based' learning rate decay...")
            schedule = StepDecay(initAlpha=1e-1,
                                 factor=0.25,
                                 dropEvery=int(epochs / 5))

        # check to see if linear learning rate decay should should be used
        elif schedule_type == "linear":
            print("[INFO] using 'linear' learning rate decay...")
            schedule = PolynomialDecay(maxEpochs=epochs,
                                       initAlpha=1e-1,
                                       power=1)

        # check to see if a polynomial learning rate decay should be used
        elif schedule_type == "poly":
            print("[INFO] using 'polynomial' learning rate decay...")
            schedule = PolynomialDecay(maxEpochs=epochs,
                                       initAlpha=1e-1,
                                       power=5)

        elif schedule_type == "one_cycle":
            print("[INFO] using 'one cycle' learning...")
            schedule = OneCycleScheduler(learning_rate)
            callbacks = [schedule]

        # if the learning rate schedule is not empty, add it to the list of
        # callbacks
        if schedule_type != "none" and schedule_type != "one_cycle":
            callbacks = [LearningRateScheduler(schedule)]

        # initialize the decay for the optimizer
        decay = 0.0

        # if we are using Keras' "standard" decay, then we need to set the
        # decay parameter
        if schedule_type == "standard":
            print("[INFO] using 'keras standard' learning rate decay...")
            decay = 1e-1 / epochs

        # otherwise, no learning rate schedule is being used
        elif schedule_type == "none":
            print("[INFO] no learning rate schedule being used")
    else:
        print("[INFO] Finding learning rate...")

    # Instantiate Sequential API
    model = Sequential([
        # Use 16 3x3 kernels with ReLU after.
        Conv2D(num_start_filters,
               kernel_size,
               padding='same',
               activation='relu',
               input_shape=(height, width, 3)),
        # Pooling layer
        MaxPooling2D(),
        # Use 32 3x3 kernels with ReLU after. Notice this is double the last layer.
        Conv2D(num_start_filters * 2,
               kernel_size,
               padding='same',
               activation='relu'),
        # Pooling layer
        MaxPooling2D(),
        # Use 64 3x3 kernels with ReLU after. Notice this is double the last layer.
        Conv2D(num_start_filters * 4,
               kernel_size,
               padding='same',
               activation='relu'),
        # Pooling layer
        MaxPooling2D(),
        # Flatten for use with fully-connected layers
        Flatten(),
        # Fully connected layer with 512 neurons
        Dense(fcl_size, activation='relu'),
        # Output layer
        Dense(num_classes, activation='softmax')
    ])

    if schedule_type != "one_cycle" and find_lr != True:
        # initialize optimizer and model, then compile it
        opt = SGD(lr=learning_rate, momentum=0.9, decay=decay)
    else:
        opt = SGD()

    # We now compile the MLP model to specify the loss function
    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    if find_lr == True:
        lr_finder = LRFinder(model)
        lr_finder.find(train_gen)
        return lr_finder
    else:
        # Training and evaluating the CNN model
        history = model.fit(train_gen,
                            steps_per_epoch=train_gen.samples // batch_size,
                            validation_data=val_gen,
                            validation_steps=val_gen.samples // batch_size,
                            callbacks=callbacks,
                            epochs=epochs)
        return model, history, schedule
Ejemplo n.º 2
0
def build_model(input_shape,
                num_classes,
                conv_kernel_size=(4, 4),
                conv_strides=(2, 2),
                conv1_channels_out=16,
                conv2_channels_out=32,
                final_dense_inputsize=100,
                **kwargs):
    """
    Define the model architecture.

    Args:
        input_shape (numpy.ndarray): The shape of the data
        num_classes (int): The number of classes of the dataset

    Returns:
        tensorflow.python.keras.engine.sequential.Sequential: The model defined in Keras

    """
    config = tf.compat.v1.ConfigProto()
    config.gpu_options.allow_growth = True
    config.intra_op_parallelism_threads = 112
    config.inter_op_parallelism_threads = 1
    sess = tf.compat.v1.Session(config=config)
    model = Sequential()

    model.add(
        Conv2D(conv1_channels_out,
               kernel_size=conv_kernel_size,
               strides=conv_strides,
               activation='relu',
               input_shape=input_shape))

    model.add(
        Conv2D(conv2_channels_out,
               kernel_size=conv_kernel_size,
               strides=conv_strides,
               activation='relu'))

    model.add(Flatten())

    model.add(Dense(final_dense_inputsize, activation='relu'))

    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=ke.losses.categorical_crossentropy,
                  optimizer=ke.optimizers.Adam(),
                  metrics=['accuracy'])

    # initialize the optimizer variables
    opt_vars = model.optimizer.variables()

    for v in opt_vars:
        v.initializer.run(session=sess)

    return model
Ejemplo n.º 3
0
def create_model(x0, fLayer, batch_input_shape, return_sequences=False, return_state=False):
    euler = EulerCell(fLayer=fLayer, x0=x0, batch_input_shape=batch_input_shape)
    PINN = RNN(cell=euler, batch_input_shape=batch_input_shape, return_sequences=return_sequences,
               return_state=return_state)
    model = Sequential()
    model.add(PINN)
    model.compile(loss='mse', optimizer=RMSprop(1e-2))
    return model


if __name__ == "__main__":
    t_train = np.asarray(pd.read_csv('./data/ttrain.csv'))[:, :, np.newaxis]
    xt_train = np.asarray(pd.read_csv('./data/xttrain.csv'))
    x0 = np.asarray(pd.read_csv('./data/x0.csv'))[0, 0] * np.ones((t_train.shape[0], 1))

    fLayer = Sequential()
    fLayer.add(Normalization(np.min(t_train), np.max(t_train), np.min(xt_train), np.max(xt_train)))
    fLayer.add(Dense(5, activation='tanh'))
    fLayer.add(Dense(1))

    t_range = np.linspace(np.min(t_train), np.max(t_train), 1000)
    xt_range = np.linspace(np.min(xt_train), np.max(xt_train), 1000)[np.random.permutation(np.arange(1000))]
    f_range = - t_range**3 * 4 * 2.7**(-t_range**3 / 3)

    fLayer.compile(loss='mse', optimizer=RMSprop(1e-2))
    inputs_train = np.transpose(np.asarray([t_range, xt_range]))
    fLayer.fit(inputs_train, f_range, epochs=200)

    mckp = ModelCheckpoint(filepath="./savedmodels/cp.ckpt", monitor='loss', verbose=1,
                          save_best_only=True, mode='min', save_weights_only=True)
Ejemplo n.º 4
0
# 学习与检测数据的划分
n_train_hours = 40000
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]


# 监督学习结果划分
train_x, train_y = train[:, :-1], train[:, -1]
test_x, test_y = test[:, :-1], test[:, -1]

# 为了在LSTM中应用该数据,需要将其格式转化为3D format,即[Samples, timesteps, features]
train_X = train_x.reshape((train_x.shape[0], 1, train_x.shape[1]))
test_X = test_x.reshape((test_x.shape[0], 1, test_x.shape[1]))

model = Sequential()
model.add(Conv1D(filters=32, kernel_size=5,
                 strides=1, padding="causal",
                 activation="relu"))
model.add(
    GRU(
        32,
        input_shape=(
            train_X.shape[1],
            train_X.shape[2]),
        return_sequences=True))
model.add(GRU(16, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(16, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss=tf.keras.losses.Huber(),
Ejemplo n.º 5
0
def build_model_and_get(input_shape) -> Sequential:
    model = Sequential()

    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])

    return model
Ejemplo n.º 6
0
# save class labels to disk to color data points in TensorBoard accordingly
with open(join(log_dir, 'metadata.tsv'), 'w') as f:
    np.savetxt(f, y_test)

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

tensorboard = TensorBoard(batch_size=batch_size,
                          embeddings_freq=1,
                          embeddings_layer_names=['features'],
                          embeddings_metadata='metadata.tsv',
                          embeddings_data=x_test)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
                 activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu', name='features'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])
Ejemplo n.º 7
0
#     lrs.append(cur_lr)
#     cur_lr = cur_lr*lr_multiplier # increase LR

# max_slope = [x - z for x, z in zip(lrs, lossess)]    
# optLr = lrs[np.where(min(max_slope)==max_slope)[0][0] ]
# print('lr:', optLr)


# Scale back the generated and ground truth results to see absolute distance for error measurements
def inv(truth, predicted):
    truth = scalery.inverse_transform(truth)
    predicted = scalery.inverse_transform(predicted)
    return truth, predicted

# Network creation
motionModel = Sequential([LSTM(input_shape=(step_size,42+42+dimension),units=64, return_sequences=True),LSTM(units=64, return_sequences=True),TimeDistributed(Dense(42, activation='tanh'))])
motionModel.compile(loss='mse', optimizer=keras.optimizers.Adam(learning_rate=optLr))
trainlosses = []
vallosses = []
meanLosstrain = []
meanLossval = []
epochs = 50
penalty=0
for i in range(epochs):
    print('epoch',i)
	
    print('training')
    for sampleNr in range(len(seq_y_train)):     
        sampleX = seq_X_train[sampleNr]
        sampley = seq_y_train[sampleNr]
        prediction = []
Ejemplo n.º 8
0
def CifarModel(cfg, training=True, stem_multiplier=3, name='CifarModel'):
    """Cifar Model"""
    logging.info(f"buliding {name}...")

    input_size = cfg['input_size']
    ch_init = cfg['init_channels']
    layers = cfg['layers']
    num_cls = cfg['num_classes']
    wd = cfg['weights_decay']
    genotype = eval("genotypes.%s" % cfg['arch'])

    # define model
    inputs = Input([input_size, input_size, 3], name='input_image')
    if training:
        drop_path_prob = Input([], name='drop_prob')
    else:
        drop_path_prob = None

    ch_curr = stem_multiplier * ch_init
    s0 = s1 = Sequential([
        Conv2D(filters=ch_curr,
               kernel_size=3,
               strides=1,
               padding='same',
               kernel_initializer=kernel_init(),
               kernel_regularizer=regularizer(wd),
               use_bias=False),
        BatchNormalization(affine=True)
    ],
                         name='stem')(inputs)

    ch_curr = ch_init
    reduction_prev = False
    logits_aux = None
    for layer_index in range(layers):
        if layer_index in [layers // 3, 2 * layers // 3]:
            ch_curr *= 2
            reduction = True
        else:
            reduction = False

        cell = Cell(genotype,
                    ch_curr,
                    reduction,
                    reduction_prev,
                    wd,
                    name=f'Cell_{layer_index}')
        s0, s1 = s1, cell(s0, s1, drop_path_prob)

        reduction_prev = reduction

        if layer_index == 2 * layers // 3 and training:
            logits_aux = AuxiliaryHeadCIFAR(num_cls, wd=wd)(s1)

    fea = GlobalAveragePooling2D()(s1)

    logits = Dense(num_cls,
                   kernel_initializer=kernel_init(),
                   kernel_regularizer=regularizer(wd))(Flatten()(fea))

    if training:
        return Model((inputs, drop_path_prob), (logits, logits_aux), name=name)
    else:
        return Model(inputs, logits, name=name)
Ejemplo n.º 9
0
    def build(self, shape, n_cls):
        model = Sequential()
        model.add(layers.Dense(512, activation='relu', input_shape=shape))
        model.add(layers.Dropout(0.2))
        model.add(layers.Dense(1024, activation='relu'))
        model.add(layers.Dropout(0.2))
        model.add(layers.Dense(512, activation='relu'))
        model.add(layers.Dropout(0.2))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dropout(0.2))
        layer01 = layers.Dense(128, activation='relu')
        model.add(layer01)

        self._add_layer_ex('dense_128_relu', layer01.output)

        self.input = model.input
        self.output = model.output

        input_a = layers.Input(shape=shape)
        input_p = layers.Input(shape=shape)
        input_n = layers.Input(shape=shape)
        processed_a = model(input_a)
        processed_p = model(input_p)
        processed_n = model(input_n)
        concatenate = layers.concatenate(
            [processed_a, processed_p, processed_n])
        model = Model(inputs=[input_a, input_p, input_n], outputs=concatenate)

        self.model = model
        pass
Ejemplo n.º 10
0
import tensorflow as tf
from tensorflow.keras import Sequential

model = Sequential()

res = tf.keras.applications.ResNet101V2(
    include_top=True,
    weights="imagenet",
    input_tensor=None,
    input_shape=None,
    pooling=None,
    classes=1000,
    classifier_activation="softmax",
)

model.add(res)

model.summary()
Ejemplo n.º 11
0
    def build(self, shape, n_cls):
        def _euclidean_distance(vects):
            x, y = vects
            return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))

        model = Sequential()
        model.add(
            layers.Conv2D(256, (3, 3),
                          padding='valid',
                          input_shape=shape,
                          activation='relu'))
        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(layers.Dropout(0.2))
        model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(layers.Dropout(0.2))
        model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        model.add(layers.Dropout(0.2))
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        model.add(layers.Dropout(0.2))
        model.add(layers.Flatten())
        model.add(layers.Dense(512, activation='relu'))
        model.add(layers.Dropout(0.1))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dropout(0.1))
        layer01 = layers.Dense(128, activation='relu')
        model.add(layer01)

        self._add_layer_ex('dense_128_relu', layer01.output)

        self.input = model.input
        self.output = model.output

        input_a = layers.Input(shape=shape)
        input_b = layers.Input(shape=shape)
        processed_a = model(input_a)
        processed_b = model(input_b)
        distance = layers.Lambda(_euclidean_distance)(
            [processed_a, processed_b])
        model = Model(inputs=[input_a, input_b], outputs=distance)

        self.model = model
        pass
Ejemplo n.º 12
0
# hacemos un reshape a los datos y lo pasamos a un solo canal
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], x_train.shape[2], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], x_test.shape[2], 1))

# normalizando los pixeles
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0

# estableciendo la forma de la imagen de entrada
input_shape = x_train.shape[1:]

# estableciendo el número de clases
n_classes = len(unique(y_train))

# definiendo el modelo
model = Sequential()
model.add(Conv2D(64, (3,3), activation='relu', input_shape=input_shape))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(32, (3,3), activation='relu'))
model.add(MaxPool2D((2, 2)))
model.add(Flatten())
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))

# definiendo la pérdida y el optimizador
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

# entrenando el modelo
model.fit(x_train, y_train, epochs=10, batch_size=128, verbose=1)
Ejemplo n.º 13
0
batchsz = 250
(x, y), (x_val, y_val) = datasets.mnist.load_data()
print('datasets:', x.shape, y.shape, x.min(), x.max())

db = tf.data.Dataset.from_tensor_slices((x, y))
db = db.map(preprocess).shuffle(batchsz*5).batch(batchsz)
ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
ds_val = ds_val.map(preprocess).batch(batchsz)

sample = next(iter(db))
print(sample[0].shape, sample[1].shape)

network = Sequential([layers.Dense(256, activation='relu'),
                      layers.Dense(128, activation='relu'),
                      layers.Dense(64, activation='relu'),
                      layers.Dense(32, activation='relu'),
                      layers.Dense(10)])
network.build(input_shape=(None, 28 * 28))
network.summary()

network.compile(optimizer=optimizers.Adam(lr=0.01),
                loss=tf.losses.CategoricalCrossentropy(from_logits=True),
                metrics=['accuracy']
                )

network.fit(db, epochs=3, validation_data=ds_val, validation_freq=2)

network.evaluate(ds_val)

network.save_weights('weights.ckpt')
Ejemplo n.º 14
0
    def build_feature(self) -> Sequential:
        """
        建立从Embedding到CNN的特征抽取模型部分
        """
        emb = Embedding(input_dim=len(self.word2idx),
                        output_dim=len(self.emb_matrix[0]),
                        weights=[self.emb_matrix], trainable=False,
                        input_length=self.query_len)

        model = Sequential()
        model.add(emb)

        for i in range(len(self.filters_nums)):
            model.add(Conv1D(filters=self.filters_nums[i], kernel_size=self.kernel_sizes[i]))
            model.add(BatchNormalization())
            model.add(Activation(activation="relu"))
            if i == len(self.filters_nums) - 1:
                model.add(GlobalMaxPool1D())
            else:
                model.add(MaxPooling1D())

        return model
Ejemplo n.º 15
0
class LSTMChem(object):
    def __init__(self, config, session='train'):
        assert session in ['train', 'generate', 'finetune'], \
                'one of {train, generate, finetune}'

        self.config = config
        self.session = session
        self.model = None

        if self.session == 'train':
            self.build_model()
        else:
            self.model = self.load(self.config.model_arch_filename,
                                   self.config.model_weight_filename)

    def build_model(self):
        st = SmilesTokenizer()
        n_table = len(st.table)
        weight_init = RandomNormal(mean=0.0,
                                   stddev=0.05,
                                   seed=self.config.seed)

        self.model = Sequential()
        self.model.add(
            LSTM(units=self.config.units,
                 input_shape=(None, n_table),
                 return_sequences=True,
                 kernel_initializer=weight_init,
                 dropout=0.3))
        self.model.add(
            LSTM(units=self.config.units,
                 input_shape=(None, n_table),
                 return_sequences=True,
                 kernel_initializer=weight_init,
                 dropout=0.5))
        self.model.add(
            Dense(units=n_table,
                  activation='softmax',
                  kernel_initializer=weight_init))

        arch = self.model.to_json(indent=2)
        self.config.model_arch_filename = os.path.join(self.config.exp_dir,
                                                       'model_arch.json')
        with open(self.config.model_arch_filename, 'w') as f:
            f.write(arch)

        self.model.compile(optimizer=self.config.optimizer,
                           loss='categorical_crossentropy')

    def save(self, checkpoint_path):
        assert self.model, 'You have to build the model first.'

        print('Saving model ...')
        self.model.save_weights(checkpoint_path)
        print('model saved.')

    def load(self, model_arch_file, checkpoint_file):
        print(f'Loading model architecture from {model_arch_file} ...')
        with open(model_arch_file) as f:
            model = model_from_json(f.read())
        print(f'Loading model checkpoint from {checkpoint_file} ...')
        model.load_weights(checkpoint_file)
        print('Loaded the Model.')
        return model
Ejemplo n.º 16
0
import matplotlib.pyplot as plt

from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras import layers

from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D, Dense, BatchNormalization, Dropout
from tensorflow.keras.layers import Flatten
from keras.regularizers import l2
from keras.datasets import cifar10
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator

model = Sequential()
model.add(
    Conv2D(32,
           kernel_size=(5, 5),
           activation='relu',
           kernel_regularizer=l2(0.001),
           input_shape=(224, 224, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(
    Conv2D(64,
           kernel_size=(5, 5),
           kernel_regularizer=l2(0.001),
           activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
Ejemplo n.º 17
0
(x, y), (x_test, y_test) = datasets.fashion_mnist.load_data()
print(x.shape, y.shape)
batchsize = 128
db = tf.data.Dataset.from_tensor_slices((x, y))
db = db.map(preprocess).shuffle(10000).batch(batchsize)

db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.map(preprocess).batch(batchsize)
db_iter = iter(db)
sample = next(db_iter)
print('batch size :', sample[0].shape, sample[1].shape)
model = Sequential([
    layers.Dense(256, activation=tf.nn.relu),
    layers.Dense(128, activation=tf.nn.relu),
    layers.Dense(64, activation=tf.nn.relu),
    layers.Dense(32, activation=tf.nn.relu),
    layers.Dense(10)
])
model.build(input_shape=[None, 28 * 28])
model.summary()  # 打印网络结构
optimizer = optimizers.Adam(lr=1e-3)


def main():
    for epoch in range(30):

        for step, (x, y) in enumerate(db):
            x = tf.reshape(x, [-1, 28 * 28])

            with tf.GradientTape() as tape:
Ejemplo n.º 18
0
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping
import matplotlib.pyplot as plt

with open('x_train.npy', 'rb') as file:
    x_train = np.load(file, allow_pickle=True)
with open('y_train.npy', 'rb') as file:
    y_train = np.load(file, allow_pickle=True)

model = Sequential([
    Flatten(input_shape=[
        4,
    ]),
    Dense(128, activation='relu'),
    Dropout(0.5),
    Dense(256, activation='relu'),
    Dropout(0.5),
    Dense(512, activation='relu'),
    Dropout(0.5),
    Dense(256, activation='relu'),
    Dropout(0.5),
    Dense(128, activation='relu'),
    Dropout(0.5),
    Dense(2, activation='softmax')
])

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
print(model.summary())


def createCallback():
Ejemplo n.º 19
0
    def __create_model(self):
        ''' Utility function to create and train model '''
        
        if self.__model_type == 'complete':
             
            fp_pre_chain = keras.Input(
                shape=self._nnX['fp_pre_chain'][0].shape, 
                name='fp_pre_chain')
            
            fp_amino_acid = keras.Input(
                shape=self._nnX['fp_amino_acid'][0].shape,
                name='fp_amino_acid')

            coupling_agent = keras.Input(
                shape=self._nnX['coupling_agent'][0].shape, 
                name='coupling_agent')
            
            coupling_strokes = keras.Input(
                shape=self._nnX['coupling_strokes'][0].shape,
                name='coupling_strokes')
            
            temp_coupling = keras.Input(
                shape=self._nnX['temp_coupling'][0].shape, 
                name='temp_coupling')
            
            deprotection_strokes = keras.Input(
                shape=self._nnX['deprotection_strokes'][0].shape, 
                name='deprotection_strokes')

            flow_rate = keras.Input(
                shape=self._nnX['flow_rate'][0].shape, 
                name='flow_rate')
            
            machine = keras.Input(
                shape=self._nnX['machine'][0].shape, 
                name='machine')
            
            temp_reactor_1 = keras.Input(
                shape=self._nnX['temp_reactor_1'][0].shape, 
                name='temp_reactor_1')

            x_pre_chain = Conv1D(2**self.model_params['pre_chain_conv1_filter'], 
                                 2**self.model_params['pre_chain_conv1_kernel'])(fp_pre_chain)
            x_pre_chain = Dense(2**self.model_params['pre_chain_dense1'])(x_pre_chain)
            x_pre_chain = Dropout(self.model_params['pre_chain_dropout1'])(x_pre_chain)
            x_pre_chain = Conv1D(2**self.model_params['pre_chain_conv2_filter'], 
                                 2**self.model_params['pre_chain_conv2_kernel'])(x_pre_chain)
            x_pre_chain = Dropout(self.model_params['pre_chain_dropout2'])(x_pre_chain)
            x_pre_chain = Activation(self.model_params['pre_chain_activation1'])(x_pre_chain)
            x_pre_chain = Flatten()(x_pre_chain)
            x_pre_chain = Dense(2**self.model_params['pre_chain_amino_acid_dense_final'], 
                                activation=self.model_params['pre_chain_activation2'])(x_pre_chain)

            x_amino_acid = Dense(2**self.model_params['amino_acid_dense1'])(fp_amino_acid)
            x_amino_acid = Dense(2**self.model_params['amino_acid_dense2'], 
                                 activation=self.model_params['amino_acid_activation1'])(x_amino_acid)
            x_amino_acid = Dropout(self.model_params['amino_acid_dropout1'])(x_amino_acid)
            x_amino_acid = Dense(2**self.model_params['pre_chain_amino_acid_dense_final'], 
                                 activation=self.model_params['amino_acid_activation2'])(x_amino_acid)

            x_chemistry = concatenate([x_pre_chain, x_amino_acid])
            x_chemistry = Dense(2**self.model_params['chemistry_dense1'])(x_chemistry)
            x_chemistry = Dense(2**self.model_params['chemistry_dense2'])(x_chemistry)

            x_coupling_agent = Activation('sigmoid')(coupling_agent)
            x_coupling_strokes = Activation('sigmoid')(coupling_strokes)
            x_temp_coupling = Activation('sigmoid')(temp_coupling)
            x_deprotection_strokes = Activation('sigmoid')(deprotection_strokes)
            x_deprotection_strokes = Dense(4, activation='relu')(x_deprotection_strokes)

            x_coupling = concatenate(
                [x_coupling_agent, x_coupling_strokes, x_temp_coupling, x_deprotection_strokes])
            x_coupling = Dense(self.model_params['coupling_dense1'])(x_coupling)
            x_coupling = Dense(self.model_params['coupling_dense2'])(x_coupling)

            x_flow_rate = Activation('sigmoid')(flow_rate)
            x_machine = Activation('sigmoid')(machine)
            x_machine = Dense(3, activation='relu')(x_machine)
            x_temp_reactor_1 = Activation('sigmoid')(temp_reactor_1)

            x_machine_variables = concatenate([x_flow_rate, x_machine, x_temp_reactor_1])
            x_machine_variables = Dense(self.model_params['machine_dense1'])(x_machine_variables)
            x_machine_variables = Dense(self.model_params['machine_dense2'])(x_machine_variables)

            x = concatenate([x_chemistry, x_coupling, x_machine_variables])
            x = Dense(2**self.model_params['concat_dense1'])(x)
            x = Dense(2**self.model_params['concat_dense2'], 
                      activation=self.model_params['concat_activation2'])(x)
            x = Dropout(self.model_params['concat_dropout1'])(x)
            x = Dense(2**self.model_params['concat_dense3'], 
                      activation=self.model_params['concat_activation3'])(x)

            first_area = Dense(1,  activation='linear', name='first_area')(x)
            first_height = Dense(1,  activation='linear', name='first_height')(x)
            first_width = Dense(1,  activation='linear', name='first_width')(x)

            first_diff = Dense(1,  activation='linear', name='first_diff')(x)

            model = Model(
                inputs=[fp_pre_chain, fp_amino_acid, 
                        coupling_agent, coupling_strokes, temp_coupling, deprotection_strokes, 
                        flow_rate, machine, temp_reactor_1], 
                outputs=[first_area, first_height, first_width, first_diff]
            )

        elif self.__model_type == 'minimal':
            model = Sequential()
            model.add(Conv1D(
                2**self.model_params['pre_chain_conv1_filter'], 
                2**self.model_params['pre_chain_conv1_kernel'], 
                input_shape=(self._nnX[0].shape[0], self._nnX[0].shape[1])))
            model.add(Dense(2**self.model_params['pre_chain_dense1']))
            model.add(Dropout(self.model_params['pre_chain_dropout1']))
            model.add(Conv1D(
                2**self.model_params['pre_chain_conv2_filter'], 
                2**self.model_params['pre_chain_conv2_kernel']))
            model.add(Dropout(self.model_params['pre_chain_dropout2']))
#             model.add(Activation(self.model_params['pre_chain_activation1']))
            model.add(Flatten())
            model.add(Dense(
                2**self.model_params['pre_chain_amino_acid_dense_final'],
                activation=self.model_params['pre_chain_activation2']))
            model.add(Dense(
                2**self.model_params['concat_dense1']))
            model.add(Dense(
                2**self.model_params['concat_dense2']))
            model.add(Dropout(
                self.model_params['concat_dropout1']))
            model.add(Dense(
                2**self.model_params['concat_dense3']))
            model.add(Dense(
                1, activation='linear'))
        
        model.compile(
            optimizer = RMSprop(lr=self.model_params['opt_lr']),
            loss=mse)

        callbacks_list = []

        if self.model_params['save_checkpoint'] == True:
            checkpoint = ModelCheckpoint(
                self.model_params['checkpoint_filepath'] + 
                "predictor-epoch{epoch:02d}-loss{loss:.4f}-val_loss{val_loss:.4f}.hdf5", 
                monitor='val_loss', 
                save_best_only=True, 
                mode='min')
            callbacks_list = [checkpoint]
        
        model.fit(self._nnX, self._nnY, 
                  epochs=self.model_params['epochs'], 
                  batch_size=self.model_params['batch_size'], 
                  validation_split=self.model_params['val_split'], 
                  callbacks=callbacks_list, verbose=False
                 )
        
        self.model = model
Ejemplo n.º 20
0
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Embedding, GlobalAveragePooling1D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import BinaryCrossentropy

# Data pipeline
(train, valid), info = tfds.load('imdb_reviews/subwords8k',
                                 split=['train', 'test'],
                                 as_supervised=True,
                                 with_info=True)

vocab_size = info.features['text'].encoder.vocab_size

train = train.shuffle(1000).padded_batch(128, padded_shapes=([None], []))
valid = valid.padded_batch(128, padded_shapes=([None], []))

# Basic embedding model
model = Sequential([
    Embedding(vocab_size, 16),
    GlobalAveragePooling1D(),
    Dense(1),
])

# Train it!
model.compile(Adam(0.01), BinaryCrossentropy(True), ['accuracy'])
model.fit(train, validation_data=valid, epochs=10)

# Save the model
model.save('models/imdb_subwords8k.h5')
Ejemplo n.º 21
0
df = past_days.append(data_test, ignore_index=True)
df = df.drop(['Adj Close'], axis=1)  # Ditch useless columns of data
inputs = scaler.transform(df)  # Normalise to between 0 and 1
X_test = []
y_test = []

for i in range(SEQ_LEN, inputs.shape[0]):
    X_test.append(inputs[i - SEQ_LEN:i])
    y_test.append(inputs[i, 0])

X_test, y_test = np.array(X_test), np.array(y_test)
print(f'Processed and scaled data according to ratio {scaler.scale_[0]}.')

# Building LSTM Model
print('Initializing LSTM Model...')
model = Sequential()
# model = tf.keras.models.load_model(load_path)  # uncomment this if not training for first time
# print(f'Model loaded successfully from {load_path}.')
'''
JSON Method which does not work due to Unknown initializer: GlorotUniform error
with open(f"models/{Stock}.json", 'r') as json_file:
    json_savedModel = json_file.read()
model = tf.keras.models.model_from_json(json_savedModel)
'''  # JSON Method


def build_model():
    model.add(
        LSTM(units=UNITS[0],
             activation=ACTIVATION,
             return_sequences=True,
Ejemplo n.º 22
0
pca2.fit(X)
pca_X = pca2.transform(X)
print(pca_X)
print(pca_X.shape)

from sklearn.model_selection import train_test_split
pca_x_train, pca_x_test = train_test_split(pca_X, test_size=1 / 7)

print('pca_x_train.shape : ', pca_x_train.shape)
print('pca_x_test.shape : ', pca_x_test.shape)
print('y_train : ', y_train)

from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, Flatten, Dense

model = Sequential()

model.add(Dense(100, input_dim=(154)))
model.add(Dense(256, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()

model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(pca_x_train, y_train, epochs=50, batch_size=256)

loss, acc = model.evaluate(x_test, y_test)
decoded_imgs = model.predict(pca_x_test)

# matplotlib 사용
'''
Ejemplo n.º 23
0
    def build(self, shape):
        """
        [1] Designed by the experimental result and LeNet-5[3] inspiration

        [2] https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py

        [3] Cun, Y. L., Bottou, L., Bengio, Y., & Haffiner, P. 
            (1998). Gradient based learning applied to document recognition. 
            Proceedings of IEEE, 86(11), 86(11):2278-2324.
        """
        model = Sequential()
        model.add(layers.Conv2D(16, (9, 9), input_shape=shape))
        model.add(layers.BatchNormalization())
        model.add(layers.Activation('relu'))
        model.add(layers.Conv2D(16, (9, 9), activation='relu'))
        model.add(layers.MaxPooling2D())
        model.add(layers.Dropout(0.25))
        model.add(layers.Conv2D(24, (7, 7), activation='relu'))
        model.add(layers.Conv2D(24, (7, 7), activation='relu'))
        model.add(layers.MaxPooling2D())
        model.add(layers.Dropout(0.25))
        model.add(layers.Conv2D(32, (5, 5)))
        model.add(layers.BatchNormalization())
        model.add(layers.Activation('relu'))
        model.add(layers.Conv2D(32, (5, 5), activation='relu'))
        model.add(layers.MaxPooling2D())
        model.add(layers.Dropout(0.5))
        model.add(layers.Flatten())
        return model
Ejemplo n.º 24
0
seed=1234
np.random.seed(seed)
tf.random.set_seed(seed)

!pip install googleimagedownloader
from googleimagedownloader.googleimagedownloader import GoogleImageDownloader



IMAGE_SIZE = (224, 224)

classifier_url ="https://tfhub.dev/google/imagenet/resnet_v2_50/classification/4"

classifier = Sequential([
    hub.KerasLayer(classifier_url, input_shape=IMAGE_SIZE+(3,))
])

test_image = get_file('img.jpg','http://www.bruna.cat/imgdownload/full/130/1302237/sunflower-phone-wallpaper.jpg')
test_image = Image.open(test_image).resize(IMAGE_SIZE)
test_image

test_image = np.array(test_image)/255.0
test_image.shape

result = classifier.predict(test_image[np.newaxis, ...])
result.shape

predicted_class = np.argmax(result[0], axis=-1)
predicted_class
Ejemplo n.º 25
0
def init_letters_CNN_model_2():
    num_classes = 26
    input_shape = (28, 28, 1)
    initializer = tf.initializers.VarianceScaling(scale=2.0)
    model = Sequential()

    model.add(Conv2D(32, kernel_size=3, activation='relu', input_shape=(28, 28, 1)))
    model.add(Conv2D(32, kernel_size=3, activation='relu'))
    model.add(Conv2D(64, kernel_size=5, strides=2, padding='same', activation='relu'))

    model.add(Dropout(0.4))

    model.add(Conv2D(64, kernel_size=3, activation='relu'))
    model.add(Conv2D(64, kernel_size=3, activation='relu'))
    model.add(Conv2D(128, kernel_size=5, strides=2, padding='same', activation='relu'))
    model.add(Dropout(0.4))

    model.add(Conv2D(128, kernel_size=4, activation='relu'))
    model.add(Flatten())
    model.add(Dropout(0.4))
    model.add(Dense(num_classes, activation='softmax'))

    return model
Ejemplo n.º 26
0
#script tensorl2.py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense

n_output_nodes = 3
n_final_layer_nodes = 1

model = Sequential()

dense_layer = Dense(n_output_nodes, input_shape=(2, ), activation='sigmoid')
model.add(dense_layer)
dense_layer = Dense(n_final_layer_nodes, activation='sigmoid')
model.add(dense_layer)

# perform calculation
x_input = tf.constant([[3, 4], [4, 5], [5, 6]], tf.float32, shape=(3, 2))
print(f'shape of x_input {tf.shape(x_input).numpy()}')
print("calling computation")
y = model.call(x_input)
print("Calulated output y:")
print(y.numpy())
def train(epochs: int):
    # loading dataset
    df = pd.read_csv('data/trans_per_month.csv', index_col='customer_id')

    # calculating product frequency  per months
    X = []
    y = []
    for i in range(len(df.columns) - 24):
        start = datetime.date(2017, 1, 1) + relativedelta(months=i)
        end = start + relativedelta(months=24)
        new_x, new_y = product_frequency_between(df, start, end)
        X.append(new_x)
        y.append(new_y)

    X = np.concatenate(X)
    y = np.concatenate(y)

    # normalizing data
    x_scaler = MinMaxScaler()
    y_scaler = MinMaxScaler()
    X = x_scaler.fit_transform(X)
    y = y_scaler.fit_transform(y.reshape(-1, 1))[:, 0]

    # saving scalers
    joblib.dump(x_scaler, 'models/serialized/x_scaler.mod')
    joblib.dump(y_scaler, 'models/serialized/y_scaler.mod')

    # spliting data
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=41)

    # reshaping for lstm
    X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
    X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)

    # create model
    model = Sequential()
    model.add(
        LSTM(16,
             input_shape=(X_train.shape[1], X_train.shape[2]),
             return_sequences=True))
    model.add(LSTM(8, input_shape=(X_train.shape[1], X_train.shape[2])))
    model.add(Dense(1, activation='relu'))

    model.compile(loss='mean_squared_error', optimizer='adam')
    model.summary()

    # training model
    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        epochs=epochs,
                        verbose=1)

    # saveing model
    model.save('models/serialized/lstm_model')

    # predicting data
    trainPredict = model.predict(X_train)
    model.reset_states()
    testPredict = model.predict(X_test)

    # invert predictions
    trainPredict = y_scaler.inverse_transform(trainPredict)
    trainY = y_scaler.inverse_transform([y_train])
    testPredict = y_scaler.inverse_transform(testPredict)
    testY = y_scaler.inverse_transform([y_test])

    # calculate root mean squared error
    trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
    typer.secho(f'🍻 Train Score: {trainScore:.2f} RMSE',
                fg=typer.colors.BRIGHT_GREEN)
    testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
    typer.secho(f'🍻 Test Score: {testScore:.2f} RMSE',
                fg=typer.colors.BRIGHT_GREEN)

    # ploting
    plt.plot(history.history['loss'], label='train')
    plt.plot(history.history['val_loss'], label='validation')
    plt.title(f'Model loss with {epochs} epoch')
    plt.legend()
    plt.show()
Ejemplo n.º 28
0
def define_context_sensitive_model(matrix, class_count):
    vocab_length = len(matrix)
    total_span = 2 * CONTEXT_SIZE + 1

    model = Sequential()
    model.add(
        Embedding(input_dim=vocab_length,
                  output_dim=EMBEDDING_DIM,
                  weights=[matrix],
                  input_length=total_span))
    model.add(Flatten())
    model.add(Dense(HIDDEN_SIZE))
    model.add(Activation("tanh"))
    model.add(Dense(class_count))
    model.add(Activation("softmax"))

    model.compile(optimizer=tf.compat.v1.train.AdamOptimizer(),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    return model
Ejemplo n.º 29
0
    get_all_pics_and_create_csv_file(LETTER_IMAGES_FOLDER99)
    get_all_pics_and_create_csv_file(LETTER_IMAGES_FOLDER100)


just_need_a_function()

images = np.array(images)
data = pd.read_csv('age.csv')
labels = np.array(data)
print(images.shape)
print(labels.shape)
images, labels = shuffle(images, labels)

X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(images, labels, test_size=0.1)

model = Sequential()

model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(64, 64, 3)))
model.add(BatchNormalization())
model.add(MaxPool2D(2, 2))
model.add(Dropout(.2))

model.add(Conv2D(32, (3, 3), activation='relu', ))
model.add(BatchNormalization())
model.add(MaxPool2D(2, 2))
model.add(Dropout(.3))

model.add(Conv2D(64, (3, 3), activation='relu', ))
model.add(BatchNormalization())
model.add(MaxPool2D(2, 2))
model.add(Dropout(.4))
Ejemplo n.º 30
0
def main(argv):
    ########### LECTURE DU FICHIER ###########
    f = open(argv[0], "r")
    texttmp = f.read().split('\n')
    text = []
    #model_path = os.path.realpath('./models/'+ argv[1])
    ########### SUPPRESSION DES MOTS TROP COURTS/LONGS ###########
    for mot in texttmp:
        if len(mot) >= min_length and len(mot) <= max_length:
            text.append(mot)

    ########### TOUS LES MOTS DE PASSE DANS UN STRING SEPARES PAR '\n' ###########
    texte_concat = '\n'.join(text)

    ########### RECUPERATION DE TOUS LES CARACTERES DIFFERENTS ###########
    chars = sorted(list(set(texte_concat)))
    num_chars = len(chars)
    print(num_chars)

    ########### CREATION DES DICOS CHAR -> INDICE ET INVERSEMENT ###########
    char_indices = dict((c, i) for i, c in enumerate(chars))
    indices_char = dict((i, c) for i, c in enumerate(chars))
    mot_long = max([len(mdp) for mdp in text])
    sequences = []
    next_chars = []

    ########### STOCKE TOUTES LES SEQUENCES DE MOTS DE PASSE DANS UNE LISTE, EN SE DEPLACANT DE step_length A CHAQUE FOIS ###########
    ########### STOCKE LE CARACTERE SUIVANT DANS NEXT_CHAR ###########
    for i in range(0, len(texte_concat) - mot_long, step_length):
        sequences.append(texte_concat[i:i + mot_long])
        next_chars.append(texte_concat[i + mot_long])
    num_sequences = len(sequences)

    ########### CREE DES TABLEAUX REMPLIS DE ZEROS ###########
    ########### X : CODAGE DES CARACTERES DE CHAQUE SEQUENCE EN BOOL ###########
    ########### Y : CODAGE DES NEXT_CHARS EN BOOL ###########
    X = np.zeros((num_sequences, mot_long, num_chars), dtype=np.bool)
    Y = np.zeros((num_sequences, num_chars), dtype=np.bool)
    for i, sequence in enumerate(sequences):
        for j, char in enumerate(sequence):
            X[i, j, char_indices[char]] = 1
        Y[i, char_indices[next_chars[i]]] = 1

    ########### ON DIVISE LE TOUT EN 3 PARTIES, TRAIN/DEV/TEST ###########
    nb = len(X)

    max_train = int(float(nb) / 100 * taux_train)
    max_dev = int(float(nb) / 100 * (taux_dev + taux_train))

    X_train = X[:max_train]
    X_dev = X[max_train:max_dev]
    X_test = X[max_dev:]

    Y_train = Y[:max_train]
    Y_dev = Y[max_train:max_dev]
    Y_test = Y[max_dev:]

    ########### DEBUT KERAS ###########verbosity
    model = Sequential()

    ########### CHARGEMENT DU MODELE ###########
    if load_model:
        model = keras.models.load_model(model_path)
    else:

        model.add(CuDNNLSTM(latent_dim, input_shape=(mot_long, num_chars)))
        model.add(Dense(num_chars, activation='softmax'))
        optimizer = RMSprop(lr=0.01)
        #optimizer = Adam(lr=0.01)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=["accuracy"])

        model.summary()
        start = time.time()
        print('Start training ')
        tensor = TensorBoard(
            log_dir='./Graph',
            histogram_freq=0,
            write_graph=True,
            write_images=True
        )  #TensorBoard(log_dir="logs\{}", histogram_freq=1, write_graph=True, write_images=True, write_grad=True)
        history = model.fit(X_train,
                            Y_train,
                            epochs=epochs,
                            batch_size=batch_size,
                            verbose=verbosity,
                            validation_data=(X_dev, Y_dev),
                            callbacks=[tensor])
        end = time.time()
        print('Finished training - time elapsed:', (end - start) / 60, 'min')

        score = model.evaluate(X_test, Y_test, verbose=0)
        print("Test score : ", score)
        print("Test accuracy : ", score[1])
        model_path = os.path.realpath('./models/model-' +
                                      str(round(score[1], 2)) + '.h5')

    if store_model:
        if not os.path.exists('models'):
            os.makedirs('models')
        print('Storing model at:', model_path)
        model.save(model_path)

    # Start sequence generation from end of the input sequence
    sequence = texte_concat[-(mot_long - 1):] + '\n'

    new_pwd = []

    print('{} generation de nouveau mdp'.format(gen_amount))

    while len(new_pwd) < gen_amount:
        x = np.zeros((1, mot_long, num_chars))
        for i, char in enumerate(sequence):
            x[0, i, char_indices[char]] = 1

        probs = model.predict(x, verbose=0)[0]
        probs /= probs.sum()
        next_idx = np.random.choice(len(probs), p=probs)

        next_char = indices_char[next_idx]
        sequence = sequence[1:] + next_char

        # New line means we have a new password
        if next_char == '\n':
            gen_pwd = sequence.split('\n')[
                1]  #[password for password in sequence.split('\n')][1]

            # Discard all passwords that are too short
            if len(gen_pwd) >= min_length:
                # Only allow new and unique passwords
                if gen_pwd not in new_pwd:
                    new_pwd.append(gen_pwd)

            if 0 == (len(new_pwd) % (gen_amount / 10)):
                print('Generated ' + str(len(new_pwd)) + ' password')

    print('First' + str(gen_amount) + ' generated passwords:')
    f = open("generated.txt", "w")
    for password in new_pwd[:gen_amount]:
        f.write(password + "\n")
    f.close()
Ejemplo n.º 31
0
def sonar_model():
    model = Sequential()
    model.add(Dense(60, input_shape=(60,), activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(30, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    # Use the Binary Cross Entropy loss function for a Binary Classifier.
    # https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#compile
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    return model