Exemplo n.º 1
0
    def test_loss_with_sample_weight_in_layer_call(self):
        class MyLayer(layers.Layer):
            def __init__(self):
                super(MyLayer, self).__init__()
                self.bias = testing_utils.Bias()

            def call(self, inputs):
                out = self.bias(inputs[0])
                self.add_loss(MAE()(inputs[1], out, inputs[2]))
                self.add_loss(
                    math_ops.reduce_mean(inputs[2] * mae(inputs[1], out)))
                return out

        inputs = Input(shape=(1, ))
        targets = Input(shape=(1, ))
        sw = Input(shape=(1, ))

        outputs = MyLayer()([inputs, targets, sw])
        model = Model([inputs, targets, sw], outputs)
        model.predict([self.x, self.y, self.w])
        model.compile(optimizer_v2.gradient_descent.SGD(0.05),
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())

        history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
        self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2],
                            1e-3)

        output = model.evaluate([self.x, self.y, self.w])
        self.assertAlmostEqual(output, 1.0, 3)

        output = model.test_on_batch([self.x, self.y, self.w])
        self.assertAlmostEqual(output, 1.0, 3)
Exemplo n.º 2
0
 def __init__(self, model_name: str, model: Model,
              fs_seq: FileSystemSequence, epochs: int, batch_size: int,
              tuned_model_params: Dict[str,
                                       float], model_params: Dict[str,
                                                                  float],
              history: Dict, discovery_path: str, weight_path: str):
     self.model_name = model_name
     self.layer_shapes: List[LayerStats] = seq(model.layers).map(
         lambda layer: LayerStats(layer.name, layer.input_shape, layer.output_shape)) \
         .to_list()
     fs_seq.set_dataset_type(DatasetType.TEST)
     test_eval = model.evaluate(fs_seq)
     fs_seq.set_dataset_type(DatasetType.TRAINING)
     self.test_loss = test_eval[0].item()
     self.test_categorical_accuracy = test_eval[1].item()
     self.train_shape = fs_seq.get_train_feature_shape()
     self.cross_val_shape = fs_seq.get_crossval_feature_shape()
     self.test_shape = fs_seq.get_test_feature_shape()
     self.epochs = epochs
     self.batch_size = batch_size
     self.tuned_model_params = tuned_model_params
     self.model_params = model_params
     self.history = {
         k: seq(history[k]).map(lambda v: v.item()).to_list()
         for k in history.keys()
     }
     self.discovery_path = discovery_path
     self.timestamp = datetime.now().timestamp()
     self.weight_path = weight_path
def validate_model(model: Model) -> [float, float, float, float]:
    y_validate = to_categorical(globals()['VALIDATION_LABELS'])
    print(y_validate.shape)

    loss, acc, recall, precision = model.evaluate(x=globals()['VALIDATION_DATA'], y=y_validate, batch_size=32)

    print("loss: %.4f" % loss)
    print("categorical accuracy: %.4f" % acc)
    print("recall: %.4f" % recall)
    print("precision: %.4f" % precision)

    return [loss, acc, recall, precision]
def validate_model_for_all_classes(model: Model) -> dict:
    class_labels = dict()
    class_entities = dict()
    results = dict()
    for i in range(0, len(globals()['VALIDATION_LABELS'])):
        if globals()['VALIDATION_LABELS'][i] not in class_labels.keys():
            class_labels[globals()['VALIDATION_LABELS'][i]] = list()
            class_entities[globals()['VALIDATION_LABELS'][i]] = list()
        class_labels[globals()['VALIDATION_LABELS'][i]].append(globals()['VALIDATION_LABELS'][i])
        class_entities[globals()['VALIDATION_LABELS'][i]].append(globals()['VALIDATION_DATA'][i])

    for key in class_labels.keys():
        label = np.asarray(class_labels[key] + ['9'])
        entities = np.asarray(class_entities[key])

        y_validate = to_categorical(label)[:-1]
        print(y_validate.shape)

        loss, acc, recall, precision = model.evaluate(x=entities, y=y_validate, batch_size=32)
        results[key] = [loss, acc, recall, precision]

    return results
Exemplo n.º 5
0
def writeCSV2(variator: Variator, model: Model, counter=[0, 0]):
    history = variator.histories[-1]
    score = model.evaluate(tX, tY, verbose=0)

    acc = history.history['acc'][-1]
    val_acc = history.history['val_acc'][-1]
    print("counter: " + str(counter[1]))
    if score[1] > counter[1]:
        counter[1] = score[1]
        model_json = model.to_json()
        with open("Models/JSON/modelEvCluster_Architecure.json",
                  "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model.save("Models/Weights/bestEvModelCluster.hd5")
    with open("Logs/modelStats.csv", "a") as f:
        if counter[0] == 0:
            counter[0] += 1
            f.write("Model,acc,val_acc,test_acc\n")
        f.write(variator.currentParameters['modelName'] + "," + str(acc) +
                "," + str(val_acc) + "," + str(score[1]))
        f.write("\n")
Exemplo n.º 6
0
class BaseKerasModel(BaseModel):
    model = None
    tensorboard = None
    train_names = ['train_loss', 'train_mse', 'train_mae']
    val_names = ['val_loss', 'val_mse', 'val_mae']
    counter = 0
    inputs = None
    hidden_layer = None
    outputs = None

    def __init__(self,
                 use_default_dense=True,
                 activation='relu',
                 kernel_regularizer=tf.keras.regularizers.l1(0.001)):
        super().__init__()
        if use_default_dense:
            self.activation = activation
            self.kernel_regularizer = kernel_regularizer

    def create_input_layer(self, input_placeholder: BaseInputFormatter):
        """Creates keras model"""
        self.inputs = tf.keras.layers.InputLayer(
            input_shape=input_placeholder.get_input_state_dimension())
        return self.inputs

    def create_hidden_layers(self, input_layer=None):
        if input_layer is None:
            input_layer = self.inputs
        hidden_layer = tf.keras.layers.Dropout(0.3)(input_layer)
        hidden_layer = tf.keras.layers.Dense(
            128,
            kernel_regularizer=self.kernel_regularizer,
            activation=self.activation)(hidden_layer)
        hidden_layer = tf.keras.layers.Dropout(0.4)(hidden_layer)
        hidden_layer = tf.keras.layers.Dense(
            64,
            kernel_regularizer=self.kernel_regularizer,
            activation=self.activation)(hidden_layer)
        hidden_layer = tf.keras.layers.Dropout(0.3)(hidden_layer)
        hidden_layer = tf.keras.layers.Dense(
            32,
            kernel_regularizer=self.kernel_regularizer,
            activation=self.activation)(hidden_layer)
        hidden_layer = tf.keras.layers.Dropout(0.1)(hidden_layer)
        self.hidden_layer = hidden_layer
        return self.hidden_layer

    def create_output_layer(self,
                            output_formatter: BaseOutputFormatter,
                            hidden_layer=None):
        # sigmoid/tanh all you want on self.model
        if hidden_layer is None:
            hidden_layer = self.hidden_layer
        self.outputs = tf.keras.layers.Dense(
            output_formatter.get_model_output_dimension()[0],
            activation='tanh')(hidden_layer)
        self.model = Model(inputs=self.inputs, outputs=self.outputs)
        return self.outputs

    def write_log(self, callback, names, logs, batch_no, eval=False):
        for name, value in zip(names, logs):
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value
            tag_name = name
            if eval:
                tag_name = 'eval_' + tag_name
            summary_value.tag = tag_name
            callback.writer.add_summary(summary, batch_no)
            callback.writer.flush()

    def finalize_model(self, logname=str(int(random() * 1000))):

        loss, loss_weights = self.create_loss()
        self.model.compile(tf.keras.optimizers.Nadam(lr=0.001),
                           loss=loss,
                           loss_weights=loss_weights,
                           metrics=[
                               tf.keras.metrics.mean_absolute_error,
                               tf.keras.metrics.binary_accuracy
                           ])
        log_name = './logs/' + logname
        self.logger.info("log_name: " + log_name)
        self.tensorboard = tf.keras.callbacks.TensorBoard(
            log_dir=log_name,
            histogram_freq=1,
            write_images=False,
            batch_size=1000,
        )
        self.tensorboard.set_model(self.model)
        self.logger.info("Model has been finalized")

    def fit(self, x, y, batch_size=1):
        if self.counter % 200 == 0:
            logs = self.model.evaluate(x, y, batch_size=batch_size, verbose=1)
            self.write_log(self.tensorboard,
                           self.model.metrics_names,
                           logs,
                           self.counter,
                           eval=True)
            print('step:', self.counter)
        else:
            logs = self.model.train_on_batch(x, y)
            self.write_log(self.tensorboard, self.model.metrics_names, logs,
                           self.counter)
        self.counter += 1

    def predict(self, arr):
        return self.model.predict(arr)

    def save(self, file_path):
        self.model.save_weights(filepath=file_path, overwrite=True)

    def load(self, file_path):
        path = os.path.abspath(file_path)
        self.model.load_weights(filepath=os.path.abspath(file_path))

    def create_loss(self):
        return 'mean_absolute_error', None
def main(arg):

    directory = Path('./saved_predictions/')
    directory.mkdir(exist_ok=True)
    directory = Path('./saved_models/')
    directory.mkdir(exist_ok=True)
    directory = Path('./training_checkpoints/')
    directory.mkdir(exist_ok=True)
    input_yx_size = tuple(args.input_yx_size)
    batch_size = args.batch_size
    epochs = args.epochs
    learning_rate = args.learning_rate
    num_test_samples = args.num_test_samples
    save_weights = args.save_weights
    every = args.every
    num_samples = args.num_samples
    save_train_prediction = args.save_train_prediction
    save_test_prediction = args.save_test_prediction
    verbose = args.verbose
    validation_ratio = args.validation_ratio
    y_axis_len, x_axis_len = input_yx_size
    decay = args.decay
    decay = args.decay
    load_weights = args.load_weights
    y_axis_len, x_axis_len = input_yx_size
    num_points = y_axis_len * x_axis_len
    is_flat_channel_in = args.is_flat_channel_in
    input_points = Input(shape=(num_points, 4))

    x = input_points
    x = Convolution1D(64, 1, activation='relu', input_shape=(num_points, 4))(x)
    x = BatchNormalization()(x)
    x = Convolution1D(128, 1, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Convolution1D(512, 1, activation='relu')(x)
    x = BatchNormalization()(x)
    x = MaxPooling1D(pool_size=num_points)(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(256, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(16,
              weights=[
                  np.zeros([256, 16]),
                  np.array([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
                            1]).astype(np.float32)
              ])(x)
    input_T = Reshape((4, 4))(x)

    # forward net
    g = Lambda(mat_mul, arguments={'B': input_T})(input_points)
    g = Convolution1D(64, 1, input_shape=(num_points, 3), activation='relu')(g)
    g = BatchNormalization()(g)
    g = Convolution1D(64, 1, input_shape=(num_points, 3), activation='relu')(g)
    g = BatchNormalization()(g)

    # feature transformation net
    f = Convolution1D(64, 1, activation='relu')(g)
    f = BatchNormalization()(f)
    f = Convolution1D(128, 1, activation='relu')(f)
    f = BatchNormalization()(f)
    f = Convolution1D(128, 1, activation='relu')(f)
    f = BatchNormalization()(f)
    f = MaxPooling1D(pool_size=num_points)(f)
    f = Dense(512, activation='relu')(f)
    f = BatchNormalization()(f)
    f = Dense(256, activation='relu')(f)
    f = BatchNormalization()(f)
    f = Dense(64 * 64,
              weights=[
                  np.zeros([256, 64 * 64]),
                  np.eye(64).flatten().astype(np.float32)
              ])(f)
    feature_T = Reshape((64, 64))(f)

    # forward net
    g = Lambda(mat_mul, arguments={'B': feature_T})(g)
    seg_part1 = g
    g = Convolution1D(64, 1, activation='relu')(g)
    g = BatchNormalization()(g)
    g = Convolution1D(32, 1, activation='relu')(g)
    g = BatchNormalization()(g)
    g = Convolution1D(32, 1, activation='relu')(g)
    g = BatchNormalization()(g)

    # global_feature
    global_feature = MaxPooling1D(pool_size=num_points)(g)
    global_feature = Lambda(exp_dim, arguments={'num_points':
                                                num_points})(global_feature)

    # point_net_seg
    c = concatenate([seg_part1, global_feature])
    """ c = Convolution1D(512, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    c = Convolution1D(256, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 1, activation='relu')(c)
    c = BatchNormalization()(c) """
    c = Convolution1D(256, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(64, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(64, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(32, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    """ c = Convolution1D(128, 4, activation='relu',strides=4)(c)
    c = Convolution1D(64, 4, activation='relu',strides=4)(c)
    c = Convolution1D(32, 4, activation='relu',strides=4)(c)
    c = Convolution1D(16, 1, activation='relu')(c)
    c = Convolution1D(1, 1, activation='relu')(c) """
    #c = tf.keras.backend.squeeze(c,3);
    c = CuDNNLSTM(64, return_sequences=False)(c)
    #c =CuDNNLSTM(784, return_sequences=False))
    #c =CuDNNLSTM(256, return_sequences=False))

    #c = Reshape([16,16,1])(c)
    c = Reshape([8, 8, 1])(c)
    c = Conv2DTranspose(8, (3, 3),
                        padding="same",
                        activation="relu",
                        strides=(2, 2))(c)
    c = Conv2DTranspose(8, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(16, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(64, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(64, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)

    #c =Dropout(0.4))

    c = Conv2DTranspose(128, (3, 3),
                        padding="same",
                        activation="relu",
                        strides=(2, 2))(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(128, (3, 3), padding="valid", activation="relu")(c)

    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(128, (3, 3),
                        padding="same",
                        activation="relu",
                        strides=(2, 2))(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(128, (3, 3), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    #c =Dropout(0.4))
    #c =tf.keras.layers.BatchNormalization())
    c = Conv2DTranspose(64, (3, 3), padding="same", strides=(4, 2))(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    #c =Dropout(0.4))
    c = Conv2DTranspose(32, (3, 3),
                        padding="same",
                        activation="relu",
                        strides=(1, 1))(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(32, (3, 1), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(32, (3, 1), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(16, (1, 1), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(8, (1, 1), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(1, (1, 1), padding="valid")(c)
    """ c =Conv2DTranspose(4, (1,1),padding="same",activation="relu"))
    c =Conv2DTranspose(2, (1,1),padding="same",activation="relu"))
    #c =Dropout(0.4))
    c =Conv2DTranspose(1, (1,1),padding="same")) """
    prediction = tf.keras.layers.Reshape([512, 256])(c)
    """ c1 ,c2  = tf.split(c,[256,256],axis=1,name="split")
    complexNum = tf.dtypes.complex(
        c1,
        c2,
        name=None
    )

    complexNum =tf.signal.ifft2d(
        complexNum,
        name="IFFT"
    )
    real = tf.math.real(complexNum)
    imag = tf.math.imag(complexNum)

    con = concatenate([real,imag])

    prediction  =tf.keras.layers.Reshape([ 512, 256])(con)
    """
    # define model
    model = Model(inputs=input_points, outputs=prediction)
    opt = tf.keras.optimizers.Adam(lr=learning_rate, decay=decay)

    loss = tf.keras.losses.MeanSquaredError()
    mertric = ['mse']
    if args.loss is "MAE":
        loss = tf.keras.losses.MeanAbsoluteError()
        mertric = ['mae']

    model.compile(
        loss=loss,
        optimizer=opt,
        metrics=mertric,
    )

    model.summary()
    if load_weights:
        model.load_weights('./training_checkpoints/cp-best_loss.ckpt')

    #edit data_loader.py if you want to play with data
    input_ks, ground_truth = load_data(num_samples,
                                       is_flat_channel_in=is_flat_channel_in)

    input_ks = input_ks / np.max(input_ks)

    checkpoint_path = "./training_checkpoints/cp-{epoch:04d}.ckpt"
    checkpoint_dir = os.path.dirname(checkpoint_path)

    # Create checkpoint callback
    #do you want to save the model's wieghts? if so set this varaible to true

    cp_callback = []

    NAME = "NUFFT_NET"

    tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
    cp_callback.append(tensorboard)
    if save_weights:
        cp_callback.append(
            tf.keras.callbacks.ModelCheckpoint(checkpoint_dir,
                                               save_weights_only=True,
                                               verbose=verbose,
                                               period=every))

    if args.is_train:
        model.fit(input_ks,
                  ground_truth,
                  batch_size=batch_size,
                  epochs=epochs,
                  validation_split=validation_ratio,
                  callbacks=cp_callback)

    if args.name_model is not "":
        model.save('./saved_mdoels/' + args.name_model)
    dict_name = './saved_predictions/'
    #return to image size
    x_axis_len = int(x_axis_len / 4)
    np.random.seed(int(time()))

    if save_train_prediction <= num_samples:
        rand_ix = np.random.randint(0, num_samples - 1, save_train_prediction)
        #kspace = np.zeros((save_train_prediction,
        #y_axis_len,input_ks[rand_ix].shape[1]))
        kspace = input_ks[rand_ix]
        if args.save_input:
            np.save("./saved_predictions/inputs.npy", input_ks[rand_ix])
        ground_truth = ground_truth[rand_ix]
        preds = model.predict(kspace, batch_size=save_train_prediction)
        for i in range(save_train_prediction):

            output = np.reshape(preds[i], (y_axis_len * 2, x_axis_len))
            output = output * 255
            output[np.newaxis, ...]
            output_gt = ground_truth[i]
            output_gt[np.newaxis, ...]
            output = np.concatenate([output, output_gt], axis=0)
            np.save(dict_name + 'prediction%d.npy' % (i + 1), output)

        input_ks, ground_truth = load_data(
            num_test_samples, 'test', is_flat_channel_in=is_flat_channel_in)

        input_ks = input_ks / np.max(input_ks)
    if args.is_eval:
        model.evaluate(input_ks,
                       ground_truth,
                       batch_size,
                       verbose,
                       callbacks=cp_callback)

    if save_test_prediction <= num_test_samples:
        rand_ix = np.random.randint(0, num_test_samples - 1,
                                    save_test_prediction)
        kspace = input_ks[rand_ix]
        if args.save_input:
            np.save("./saved_predictions/test_inputs.npy", input_ks[rand_ix])
        ground_truth = ground_truth[rand_ix]
        preds = model.predict(kspace, batch_size=save_test_prediction)
        for i in range(save_test_prediction):

            output = np.reshape(preds[i], (y_axis_len * 2, x_axis_len))
            output = output * 255
            output[np.newaxis, ...]
            output_gt = ground_truth[i]
            output_gt[np.newaxis, ...]
            output = np.concatenate([output, output_gt], axis=0)
            np.save(dict_name + 'test_prediction%d.npy' % (i + 1), output)
Exemplo n.º 8
0
y = tf.keras.layers.Dense(units=32,
                          activation='elu',
                          kernel_initializer='he_uniform')(y)
y = tf.keras.layers.Dense(units=2,
                          activation='softmax',
                          kernel_initializer='he_uniform')(y)
wenz_model = Model(inputs=[input1, input2], outputs=y)

adam = Adam(lr=0.02, decay=0.01)
wenz_model.compile(optimizer='adam',
                   loss=tf.keras.losses.BinaryCrossentropy(),
                   metrics=['accuracy'])

checkpoint_path = "training/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                                 save_weights_only=True,
                                                 verbose=1)

wenz_model.fit(XY, epochs=10,
               callbacks=[cp_callback])  # add validation training_data?
test_loss, test_acc = wenz_model.evaluate(XYt, verbose=2)

print('\nTest accuracy:', test_acc)

wenz_model.save(
    '/home/pirate/PycharmProjects/SchafkopfAI/models/trained_models/test-wenz-prediction6'
)
store.close()
                             save_best_only=True)

lr_scheduler = LearningRateScheduler(lr_schedule)

lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=5,
                               min_lr=0.5e-6)

callbacks = [checkpoint, lr_reducer, lr_scheduler]

# choose training configs
sgd = SGD(lr=0.001, momentum=0.9)
model.compile(optimizer=sgd,
              loss=categorical_crossentropy,
              metrics=['accuracy'])
model.summary()

# train
hist = model.fit(x_train,
                 y_train,
                 batch_size=100,
                 epochs=10,
                 shuffle=True,
                 verbose=1,
                 validation_split=0.1,
                 callbacks=callbacks)

# test
model.evaluate(x_test, y_test, verbose=1)
Exemplo n.º 10
0
def train_test_model2(hparams):

    start_time = time.time()
    input = Input(shape=(X_train.shape[1:]))
    output = tf.expand_dims(input, axis=-1)
    output = GRU(units=hparams[HP_NUM_UNITS],
                 activation="relu",
                 return_sequences=True)(output)
    output = GRU(
        units=hparams[HP_NUM_UNITS],
        activation="relu",
    )(output)

    #dropout_out = Dropout(0.8)(output)

    # Adding some further layers (replace or remove with your architecture):
    out = Dense(units=20, activation="relu")(output)

    # Building model:
    model = Model(inputs=input, outputs=out)
    model.compile(loss='mean_squared_error',
                  optimizer="rmsprop",
                  metrics=['acc'])
    print(model.summary())
    plot_model(model,
               to_file='model_plot' + dateAndTimeNow + '.png',
               show_shapes=True,
               show_layer_names=True)
    model.fit(X_train,
              y_train,
              epochs=epochs,
              batch_size=hparams[HP_BATCH_SIZE],
              verbose=1,
              callbacks=[
                  tf.keras.callbacks.TensorBoard("logs/fit/" + dateAndTimeNow)
              ])  # Run with 1 epoch to speed things up for demo purposes
    test_predictions = model.predict(X_test)

    y_test2 = np.argmax(y_test, axis=1)
    test_predictions = np.round(test_predictions)
    sums = np.sum(test_predictions, axis=1)

    test_predictions2 = np.argmax(test_predictions, axis=1)
    #print(sklearn.metrics.multilabel_confusion_matrix(y_test2, test_predictions2))
    _, accuracy = model.evaluate(X_test, y_test)
    evaluate_model("GRU" + str(hparams[HP_NUM_UNITS]), y_test2,
                   test_predictions2)
    print("Total Time" + str(time.time() - start_time))
    """
    # Best 0.96
    #step = X_train.shape[1]
    model = Sequential()
    #model.add(Embedding(10000,300,input_length=1))
    model.add(GRU(input_shape=(100), units=10, activation="softmax"))
    model.add(Dense(20, activation="softmax"))
    model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['acc'])
    model.summary()
    plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)

    model.fit(X_train, y_train, epochs=epochs, batch_size=hparams[HP_BATCH_SIZE])  # Run with 1 epoch to speed things up for demo purposes
    test_predictions = model.predict(X_test)
    

    """

    return accuracy
Exemplo n.º 11
0
x = Conv1D(32, 4, padding="same", activation='relu')(inputs)
x = Conv1D(64, 4, padding="same", activation='relu')(x)
x = Conv1D(128, 2, padding="same", activation='relu')(x)
x = MaxPooling1D(pool_size=(3))(x)

# print(x.shape)
# x = Flatten()(x)
# x = Reshape((-1,-1))(x)
x = LSTM(64, dropout=0.2, recurrent_dropout=0.2, return_sequences=False)(x)

x = Dense(128, activation='relu')(x)
x = Dense(emotion_len, activation='softmax')(x)

model = Model(inputs=inputs, outputs=x)

#model.summary()
model.compile(loss=categorical_crossentropy,
              optimizer=RMSprop(),
              metrics=['accuracy'])

history = model.fit(X_train,
                    Y_train,
                    batch_size=32,
                    epochs=50,
                    validation_data=(X_test, Y_test),
                    verbose=1,
                    shuffle=True)

loss, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', loss)
print('Test accuracy:', acc)