Ejemplo n.º 1
0
 def __init__(self, input_shape, action_space):
     self.model = models.Sequential()
     self.model.add(
         layers.Conv2D(32,
                       8,
                       strides=(4, 4),
                       padding="valid",
                       activation="relu",
                       input_shape=input_shape,
                       data_format="channels_first"))
     self.model.add(
         layers.Conv2D(64,
                       4,
                       strides=(2, 2),
                       padding="valid",
                       activation="relu",
                       input_shape=input_shape,
                       data_format="channels_first"))
     self.model.add(
         layers.Conv2D(64,
                       3,
                       strides=(1, 1),
                       padding="valid",
                       activation="relu",
                       input_shape=input_shape,
                       data_format="channels_first"))
     self.model.add(layers.Flatten())
     self.model.add(layers.Dense(512, activation="relu"))
     self.model.add(layers.Dense(action_space))
     self.model.compile(loss="mean_squared_error",
                        optimizer=optimizers.RMSprop(lr=0.00025,
                                                     rho=0.95,
                                                     epsilon=0.01),
                        metrics=["accuracy"])
     self.model.summary()
Ejemplo n.º 2
0
def getAcc(model_config: 'tuple model hyperparameters', 
           data: 'tuple train and test data')->'float loss and acc on test data':
    xtrain, ytrain, xval, yval = data

    nnodes_h1, dropout_h1, nnodes_h2, dropout_h2, merge, nbatch, opt, nepoch, lr = model_config
    nframe = xtrain.shape[1]
    isize = xtrain.shape[2]
    
    model = Sequential()
    model.add(Bidirectional(LSTM(nnodes_h1, return_sequences = True, dropout= dropout_h1),
                                merge_mode = merge, input_shape = (nframe, isize)))
    model.add(Bidirectional(LSTM(nnodes_h2, return_sequences = False, dropout = dropout_h2),
                            merge_mode = merge))
    # model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    
    # chose optimiser
    if opt == 'adam':
        opt= optimizers.Adam(learning_rate = lr)
    elif opt == 'sgd':
        opt= optimizers.SGD(learning_rate = lr)
    else:
        opt = optimizers.RMSprop(learning_rate = lr)
    model.compile(loss = 'binary_crossentropy', optimizer = opt, metrics = ['accuracy'])
    
    model.fit(xtrain, ytrain, batch_size=nbatch, epochs= nepoch, verbose = 0)
    loss, acc = model.evaluate(xval, yval, verbose = 0)
    clear_session()
    return loss, acc
def tsnn_network(img_d=224, img_c=3):
    anchor_input = Input((
        img_d,
        img_d,
        img_c,
    ), name='anchor_input')
    positive_input = Input((
        img_d,
        img_d,
        img_c,
    ), name='positive_input')
    negative_input = Input((
        img_d,
        img_d,
        img_c,
    ), name='negative_input')

    # Shared embedding layer for positive and negative items
    Shared_DNN = create_base_network()
    encoded_anchor = Shared_DNN(anchor_input)
    encoded_positive = Shared_DNN(positive_input)
    encoded_negative = Shared_DNN(negative_input)

    merged_vector = concatenate(
        [encoded_anchor, encoded_positive, encoded_negative],
        axis=-1,
        name='merged_layer')
    model = Model(inputs=[anchor_input, positive_input, negative_input],
                  outputs=merged_vector)
    #model.compile(optimizer=optimizers.RMSprop(lr=2e-5), loss=triplet_loss)
    model.compile(optimizer=optimizers.RMSprop(lr=1e-3), loss=triplet_loss)
    return model
Ejemplo n.º 4
0
def create_model(input_shape):
    model = models.Sequential([
        layers.Convolution2D(
            filters=16,
            input_shape=input_shape,
            kernel_size=(4, 4),
            padding="same",
            activation="relu",
        ),
        layers.MaxPooling2D(pool_size=(2, 2)),
        layers.Convolution2D(filters=32,
                             kernel_size=(4, 4),
                             padding="same",
                             activation="relu"),
        layers.MaxPooling2D(input_shape=(2, 2)),
        layers.Convolution2D(filters=64,
                             kernel_size=(3, 3),
                             padding="same",
                             activation="relu"),
        layers.Flatten(),
        layers.Dense(units=256, activation="relu"),
        layers.Dense(units=6, activation="softmax"),
    ])
    model.compile(
        optimizer=optimizers.RMSprop(lr=2e-4),
        loss="sparse_categorical_crossentropy",
        metrics=["acc"],
    )
    return model
Ejemplo n.º 5
0
def get_original_model(input_size, n_actions, lr):
    """Returns short conv model with mask at the end of the network. Copy of network from papers."""
    screen_input = layers.Input(input_size)
    actions_input = layers.Input(n_actions)

    x = layers.Lambda(lambda x: x / 255.0)(screen_input)

    x = layers.Conv2D(16, (8, 8), strides=(4, 4))(x)
    x = layers.ReLU()(x)

    x = layers.Conv2D(32, (4, 4), strides=(2, 2))(x)
    x = layers.ReLU()(x)

    x = layers.Flatten()(x)
    x = layers.Dense(256, activation='relu')(x)
    x = layers.Dense(n_actions)(x)
    x = layers.Multiply()([x, actions_input])

    model = models.Model(inputs=[screen_input, actions_input], outputs=x)
    optimizer = optimizers.RMSprop(lr=lr,
                                   rho=0.95,
                                   epsilon=0.01,
                                   momentum=0.95)
    model.compile(optimizer, loss='mse')
    return model
Ejemplo n.º 6
0
    def __init__(self,
                 model,
                 learning_rate=7e-3,
                 gamma=0.99,
                 batch_size=64,
                 value_c=0.5,
                 entropy_c=1e-4):
        self.model = model
        self.gamma = gamma
        self.batch_size = batch_size
        self.value_c = value_c  # Coefficients are used for the loss terms.
        self.entropy_c = entropy_c

        # Define optimiser and losses used to train the model.
        self.model.compile(
            optimizer=ko.RMSprop(lr=learning_rate),
            #optimizer=ko.Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999, amsgrad=False),
            loss=[self._logits_loss, self._value_loss])

        # Structures for storing data used during training.
        self.actions = np.empty((batch_size, ), dtype=np.int32)
        self.rewards, self.dones, self.values = np.empty((3, batch_size))
        self.observations = np.empty((batch_size, ) + self.model.in_shape)
        self.ep_rewards = [0.0]
        self.t = 0  # Total timesteps seen.
        self.i = 0  # Counter used to determine when to train (batch size reached).
        self.update_count = 0  # Number of updates completed.
Ejemplo n.º 7
0
def compile_model_and_train(classifier, regressor, lr, labels, val_classifier_imgs, val_regressor_imgs, val_labels, val_boxes):
    classifier.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=lr), metrics=['acc'])
    regressor.compile(loss='mean_squared_error', optimizer=optimizers.Adam(lr=lr), metrics=['acc'])
    with open(OUTPUT_STATISTICS_PATH, 'w', newline='') as statistics_file:
        # Train models
        for epoch in range(NUM_EPOCHS):
            print("\n\nEPOCH: ", epoch)
            # Create a list of batches
            dataset = create_batch_list(labels, positive_box_threshold=0.8, negative_box_threshold=0.3)

            error_train_classifier = []
            error_train_regressor = []

            # Train models on every batch
            for (classifier_images, regressor_images, classes, boxes) in dataset:
                error_train_classifier.append(classifier.train_on_batch(np.array(classifier_images), np.array(classes)))
                error_train_regressor.append(regressor.train_on_batch(np.array(regressor_images), np.array(boxes)))
            error_val_classifier = classifier.test_on_batch(np.array(val_classifier_imgs), np.array(val_labels))
            error_val_regressor = regressor.test_on_batch(np.array(val_regressor_imgs), np.array(val_boxes))

            # Calculate statistics
            mean_error_train_classifier = mean(error_train_classifier[:][0])
            mean_accuracy_train_classifier = mean(error_train_classifier[:][1])
            mean_error_train_regressor = mean(error_train_regressor[:][0])
            mean_accuracy_train_regressor = mean(error_train_regressor[:][1])

            print("Classifier train error: %lf, Accuracy: %lf" % (mean_error_train_classifier, mean_accuracy_train_classifier))
            print("Classifier validation error: %lf, Accuracy: %lf" % (error_val_classifier[0], error_val_classifier[1]))
            print("Regressor train error: %lf, Accuracy: %lf" % (mean_error_train_regressor, mean_accuracy_train_regressor))
            print("Regressor validation error: %lf, Accuracy: %lf" % (error_val_regressor[0], error_val_regressor[1]))

            # Save statistics
            statistics_writer = writer(statistics_file, delimiter=',')
            statistics_writer.writerow([epoch + 1, mean_error_train_classifier, mean_accuracy_train_classifier, mean_error_train_regressor, mean_accuracy_train_regressor])
Ejemplo n.º 8
0
def get_ann(
    n_hidden=4, n_neurons=20, kernel_initializer="he_normal", bias_initializer=initializers.Ones()
):
    model = Sequential()

    model.add(
        Dense(
            units=n_neurons,
            input_dim=14,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )
    )
    model.add(keras.layers.LeakyReLU(alpha=0.2))
    model.add(Dropout(rate=0.1))

    for _ in range(n_hidden):
        model.add(
            Dense(
                units=n_neurons,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
            )
        )
        model.add(keras.layers.LeakyReLU(alpha=0.2))
        model.add(Dropout(rate=0.1))

    model.add(Dense(units=1, activation="linear"))

    optimizer = optimizers.RMSprop()
    model.compile(loss="mse", optimizer=optimizer, metrics=["mse", "mae"])

    return model
def MobileNet_impl(alpha=1):
    data_in = Input(shape=(28, 28, 1))
    x = ZeroPadding2D(padding=(2, 2))(data_in)

    x = Conv2D(int(32 * alpha), (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = depthwise_sep_conv(x, 64, alpha)
    x = depthwise_sep_conv(x, 128, alpha, strides=(2, 2))
    x = depthwise_sep_conv(x, 128, alpha)
    x = depthwise_sep_conv(x, 256, alpha, strides=(2, 2))
    x = depthwise_sep_conv(x, 256, alpha)
    x = depthwise_sep_conv(x, 512, alpha, strides=(2, 2))
    for _ in range(5):
        x = depthwise_sep_conv(x, 512, alpha)
    x = depthwise_sep_conv(x, 1024, alpha, strides=(2, 2))
    x = depthwise_sep_conv(x, 1024, alpha)
    x = GlobalAveragePooling2D()(x)
    x = Dense(units=10)(x)
    x = Activation('softmax')(x)

    mobilenet_model = keras.Model(inputs=data_in,
                                  outputs=x,
                                  name="mobilenet_model")
    mobilenet_model.compile(optimizer=optimizers.RMSprop(lr=0.01),
                            loss=keras.losses.categorical_crossentropy,
                            metrics=['accuracy'])

    return mobilenet_model
Ejemplo n.º 10
0
    def __init__(self, encoding_size, gene_size,
                 generator_factory: Callable[[int, int], Model] = _build_generator,
                 encoder_factory: Callable[[int, int], Model] = _build_encoder,
                 discriminator_factory: Callable[[int, int], Model] = _build_discriminator):
        super().__init__(encoding_size, gene_size, generator_factory, encoder_factory, discriminator_factory)
        discr_optimizer = optimizers.RMSprop(learning_rate=0.0075, rho=0.85, momentum=0.1)

        self._generator.trainable = True
        self._encoder.trainable = False
        self._discriminator.trainable = False
        discr_gen_output = self._discriminator((self._generator.inputs[0], self._generator.output))
        self._train_gen_w_discr = Model(self._generator.inputs, discr_gen_output, name='train-generator-with-discriminator')
        self._train_gen_w_discr.compile(optimizer=discr_optimizer, loss=losses.binary_crossentropy)

        gen_output = self._generator((self._encoder.output, self._generator.inputs[1]))
        self._train_gen_w_enc = Model((self._encoder.inputs, self._generator.inputs[1]), gen_output, name='train-generator-with-encoder')
        self._train_gen_w_enc.compile(optimizer=discr_optimizer, loss=losses.mse)

        self._generator.trainable = False
        self._encoder.trainable = True
        self._discriminator.trainable = False
        discr_enc_output = self._discriminator((self._encoder.output, self._encoder.input))
        self._train_enc_w_discr = Model(self._encoder.inputs, discr_enc_output, name='train-encoder-with-discriminator')
        self._train_enc_w_discr.compile(optimizer=discr_optimizer, loss=losses.binary_crossentropy)

        enc_output = self._encoder(self._generator.output)
        self._train_enc_w_gen = Model(self._generator.inputs, enc_output, name='train-encoder-with-generator')
        self._train_enc_w_gen.compile(optimizer=discr_optimizer, loss=losses.mse)

        self._generator.trainable = False
        self._encoder.trainable = False
        self._discriminator.trainable = True
        self._discriminator.compile(optimizer=discr_optimizer, loss=losses.binary_crossentropy)
Ejemplo n.º 11
0
def learn_embedding_model(cf, x, y, embedding_size, num_epochs, batch_size,
                          num_sampled):
    """ Use tensorflow to learn embedding model.
    """
    vocab_size = len(cf.sensornames)
    print('parameters', vocab_size, embedding_size, num_sampled)
    model = NCEModel(vocab_size=vocab_size,
                     embeddings_size=embedding_size,
                     nce_num_sampled=num_sampled)
    optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
    model.compile(optimizer=optimizer, loss=empty_loss)

    logdir = create_logdir()  # prepare tensorboard callback
    tensorboard_callback = keras.callbacks.TensorBoard(logdir, profile_batch=0)
    os.makedirs(logdir, exist_ok=True)

    model.fit(x=[x.reshape([-1, 1]), y.reshape([-1, 1])],
              y=y,
              epochs=num_epochs,
              batch_size=64,
              callbacks=[tensorboard_callback])

    sensor_x = np.arange(vocab_size)
    embeddings = model.predict(
        (sensor_x.reshape([-1, 1]), sensor_x.reshape([-1, 1])))
    normalized_embeddings = normalize_embeddings(embeddings)
    if cf.save_model:
        print('Saving model.')
        embeddings_dir = 'model'
        embeddings_filename = os.path.join(embeddings_dir, 'embeddings')
        np.save(embeddings_filename, normalized_embeddings)
    return normalized_embeddings
Ejemplo n.º 12
0
def train_model(model, train_folder, test_folder, train_batchsize, val_batchsize, image_size, filename,
                epochs = 3,classmode='categorical', lr=1e-4):
    # No Data augmentation
    train_datagen = ImageDataGenerator(rescale=1./255)
    validation_datagen = ImageDataGenerator(rescale=1./255)

    # Data Generator for Training data
    train_generator = train_datagen.flow_from_directory(
            train_folder,
            target_size=(image_size, image_size),
            batch_size=train_batchsize,
            class_mode=classmode)

    # Data Generator for Validation data
    validation_generator = validation_datagen.flow_from_directory(
            test_folder,
            target_size=(image_size, image_size),
            batch_size=val_batchsize,
            class_mode=classmode,
            shuffle=False)

    # Compile the model
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.RMSprop(lr=lr),
                  metrics=['acc'])

    # Train the Model
    history = model.fit_generator(
      train_generator, train_generator.n // train_batchsize, epochs=epochs, workers=4,
        validation_data=validation_generator, validation_steps=validation_generator.n // val_batchsize)

    # Save the Model
    model.save(filename)

    return model, history
Ejemplo n.º 13
0
def createModel():
    model = Sequential()

    model.add(Conv2D(32, (3, 3), padding='same', input_shape=(64, 64, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(128, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(128, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))
    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(128, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='softmax'))
    model.compile(optimizers.RMSprop(lr=0.0005, decay=1e-6), loss="categorical_crossentropy", metrics=["accuracy"])

    return model
Ejemplo n.º 14
0
    def __init__(self,
                 train=True,
                 discount_factor=0.99,
                 learning_rate=0.0001,
                 step_size=20):
        self.available_screen_features = AVAILABLE_SCREEN_FEATURES
        self.available_minimap_features = AVAILABLE_MINIMAP_FEATURES
        self.available_player_features = AVAILABLE_PLAYER_FEATURES
        self.available_actions = AVAILABLE_ACTIONS
        self.available_arguments = AVAILABLE_ARGUMENTS
        self.available_argument_ids = [
            argument.id for argument in AVAILABLE_ARGUMENTS
        ]
        self.default_arguments = DEFAULT_ARGUMENTS

        self.is_training = train
        self.discount_factor = discount_factor
        self.learning_rate = learning_rate
        self.optimizer = optimizers.RMSprop(learning_rate=learning_rate)
        self.step_size = step_size
        self.memory = []

        self.obs_spec = None
        self.action_spec = None
        self.model = None
        self.last_state = None
        self.last_action = None
Ejemplo n.º 15
0
def get_optimizer(op_type, learning_rate):
    if op_type == 'sgd':
        return optimizers.SGD(learning_rate)
    elif op_type == 'rmsprop':
        return optimizers.RMSprop(learning_rate)
    elif op_type == 'adagrad':
        return optimizers.Adagrad(learning_rate)
    elif op_type == 'adadelta':
        return optimizers.Adadelta(learning_rate)
    elif op_type == 'adam':
        return optimizers.Adam(learning_rate, clipnorm=5)
    elif op_type == 'adamw':
        return AdamWeightDecay(
            learning_rate=learning_rate,
            weight_decay_rate=0.01,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-6,
            exclude_from_weight_decay=["layer_norm", "bias"])
    elif op_type == 'adamw_2':
        return create_optimizer(init_lr=learning_rate,
                                num_train_steps=9000,
                                num_warmup_steps=0)
    elif op_type == 'adamw_3':
        return create_optimizer(init_lr=learning_rate,
                                num_train_steps=9000,
                                num_warmup_steps=100)
    else:
        raise ValueError('Optimizer Not Understood: {}'.format(op_type))
Ejemplo n.º 16
0
def createModel(nb_filters1=64, nb_filters2=128):
    model = Sequential()

    # First Convolution Layer
    model.add(
        Conv2D(nb_filters1,
               kernel_size=5,
               activation='relu',
               input_shape=(img_width, img_height, 1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))

    # Second Convolution Layer
    model.add(Conv2D(nb_filters2, kernel_size=3, activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    # 2D to 1D
    model.add(Flatten())
    # Output Layer
    model.add(Dense(classes_num, activation='softmax'))

    model.compile(
        loss=
        'categorical_crossentropy',  # tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        optimizer=optimizers.RMSprop(
            lr=lr),  #optimizers.SGD(lr=lr,momentum=0.1), 
        metrics=[tf.keras.metrics.CategoricalAccuracy()])

    return model
Ejemplo n.º 17
0
    def __init__(self, hull, lr, mmnt=0., wd=0, lr_decay=None, warmup=0,
                 grad_limit=-1, grad_accum=1, update_scope=None, scope='RMSPROP'):
        super(RMSProp, self).__init__(scope)
        self.hull = hull
        self.wd = wd / 2.

        self.update_scope = update_scope
        self.grad_accum = grad_accum
        self.cnt = 0
        if grad_limit < 0:
            grad_limit = None
        if lr_decay is not None:
            lr = lr_decay(lr)
        if warmup>0:
            lr = WarmUpWrapper(warmup, lr)
        self.optz = optimizers.RMSprop(lr, momentum=mmnt, clipvalue=grad_limit)
        if getattr(self.hull, '_in_ema_mode', False):
            self.optz = tfa.optimizers.MovingAverage(self.optz)

            def average_op(itself, var, average_var):
                decay = tf.constant(self.hull.decay_fn(self.hull.step), dtype=tf.float32)
                return tf.keras.backend.moving_average_update(average_var, var, decay)

            self.optz.average_op = MethodType(average_op, self.optz)
            self.hull._optz = self.optz
Ejemplo n.º 18
0
def model_config():
    train_data_shape = (800, 1, 1000)
    kernel_divergence_fn = lambda q, p, _: tfp.distributions.kl_divergence(
        q, p) / (train_data_shape[0] * 1.0)

    model_vi = Sequential()
    model_vi.add(tf.keras.layers.InputLayer(train_data_shape[1:],
                                            name="input"))
    model_vi.add(tf.keras.layers.Flatten())
    model_vi.add(
        tfp.layers.DenseFlipout(500,
                                activation='relu',
                                kernel_divergence_fn=kernel_divergence_fn))
    model_vi.add(
        tfp.layers.DenseFlipout(100,
                                activation='relu',
                                kernel_divergence_fn=kernel_divergence_fn))
    model_vi.add(
        tfp.layers.DenseFlipout(50,
                                activation='relu',
                                kernel_divergence_fn=kernel_divergence_fn))
    model_vi.add(
        tfp.layers.DenseFlipout(6,
                                activation='softmax',
                                kernel_divergence_fn=kernel_divergence_fn))

    model_vi.compile(loss='categorical_crossentropy',
                     optimizer=optimizers.RMSprop(lr=1e-4),
                     metrics=['accuracy'])
    model_vi.summary()
    return model_vi
Ejemplo n.º 19
0
def lstm_for_dynamics(cf_trunc,
                      num_epochs,
                      seq_num=10,
                      deployment_mode='train'):
    features = np.transpose(cf_trunc)
    states = np.copy(features[:, :])  #Rows are time, Columns are state values

    # Need to make batches of 10 input sequences and 1 output
    total_size = np.shape(features)[0] - seq_num
    input_seq = np.zeros(shape=(total_size, seq_num, np.shape(states)[1]))
    output_seq = np.zeros(shape=(total_size, np.shape(states)[1]))

    for t in range(total_size):
        input_seq[t, :, :] = states[None, t:t + seq_num, :]
        output_seq[t, :] = states[t + seq_num, :]

    idx = np.arange(total_size)
    np.random.shuffle(idx)

    input_seq = input_seq[idx, :, :]
    output_seq = output_seq[idx, :]

    # Model architecture
    model = Sequential()
    model.add(LSTM(32, input_shape=(
        seq_num,
        np.shape(states)[1])))  # returns a sequence of vectors of dimension 32
    model.add(Dense(np.shape(states)[1], activation='linear'))

    # design network
    my_adam = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)

    filepath = "best_weights_lstm.h5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min',
                                 save_weights_only=True)
    callbacks_list = [checkpoint]

    # fit network
    model.compile(optimizer=my_adam,
                  loss='mean_squared_error',
                  metrics=[coeff_determination])

    if deployment_mode == 'train':
        train_history = model.fit(
            input_seq,
            output_seq,
            epochs=num_epochs,
            batch_size=16,
            validation_split=0.33,
            callbacks=callbacks_list)  #validation_split = 0.1
        np.save('Train_Loss.npy', train_history.history['loss'])
        np.save('Val_Loss.npy', train_history.history['val_loss'])

    model.load_weights(filepath)

    return model
Ejemplo n.º 20
0
def build_model():
    opt = optimizers.RMSprop(lr=learning_rate)
    model = tf.keras.Sequential()
    #lstm encoder
    model.add(
        layers.GRU(n_hidden,
                   input_shape=(OBSERVE_LENGTH, dim_input),
                   return_sequences=False,
                   stateful=False,
                   dropout=0.2))
    model.add(layers.RepeatVector(PREDICT_LENGTH))
    #lstm decoder
    model.add(
        layers.GRU(n_hidden,
                   return_sequences=True,
                   stateful=False,
                   dropout=0.2))
    model.add(
        layers.TimeDistributed(layers.Dense(3),
                               input_shape=(PREDICT_LENGTH, None)))
    model.add(layers.Activation('linear'))
    model.compile(loss='mse', optimizer=opt)

    print(model.summary())
    return model
Ejemplo n.º 21
0
def main(epochs=400):
    seq_len = 64
    batch_size = 64
    data_path = Path('preprocessed')
    output_path = output_path_name()

    raw_text = load_raw_text(data_path)
    train_batches, char2index = load_data(raw_text, seq_len, batch_size)
    model = build_model(batch_size=batch_size, embedding_dim=32,
                        num_lstm_layers=2, lstm_dim=192,
                        dropout_proportion=0.0, seq_len=seq_len,
                        vocab_size=len(char2index))
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.RMSprop(clipnorm=5.0),
                  metrics=['accuracy'])

    output_path.mkdir()
    print(f'Saving the model to {output_path}')
    save_net(model, char2index, output_path)

    cb = callbacks(output_path, raw_text, model, char2index)
    hist = model.fit(train_batches, epochs=epochs, shuffle=False,
                     callbacks=cb, verbose=1)

    save_weights(model, output_path)
    save_training_history(hist, output_path)
Ejemplo n.º 22
0
    def learning_tensor_model(self, layer_units_list, epochs_count,
                              model_name):
        """모델 생성 및 학습"""

        model = models.Sequential()
        input_layer_unit = [layer_units_list[0]]
        inter_layer_unit = layer_units_list[1:-1]
        output_layer_unit = [layer_units_list.pop()]

        model.add(
            layers.Dense(input_layer_unit.pop(),
                         activation='relu',
                         input_shape=(100, )))
        for inter_unit in input_layer_unit:
            model.add(layers.Dense(inter_unit, activation='relu'))
        model.add(layers.Dense(output_layer_unit.pop(), activation='sigmoid'))

        model.compile(optimizer=optimizers.RMSprop(lr=0.001),
                      loss=losses.binary_crossentropy,
                      metrics=[metrics.binary_accuracy])

        model.fit(self.x_train,
                  self.y_train,
                  epochs=epochs_count,
                  batch_size=500)

        self.save_tensor_model(model, model_name)
        return model
Ejemplo n.º 23
0
def create_model_v15(show_summary=False):
    model = Sequential()
    model.add(Conv2D(32, (8, 8), padding='same', input_shape=(21,20,3)))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (8, 8)))
    model.add(Activation('relu'))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(128, (3, 3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(8, activation='softmax'))
    
    model.compile(optimizers.RMSprop(lr=0.0005, decay=1e-5), loss="categorical_crossentropy",
                  metrics=["categorical_accuracy"])
    
    if show_summary:
        model.summary()
    
    return model
Ejemplo n.º 24
0
 def __init__(self, model):
     self.params = {'value': 0.5, 'entropy': 0.0001, 'gamma': 0.99}
     self.model = model
     self.model.compile(
         optimizer=ko.RMSprop(lr=0.0007),
         # define separate losses for policy logits and value estimate
         loss=[self._logits_loss, self._value_loss])
Ejemplo n.º 25
0
def get_expanded_model(input_size, n_actions, lr):
    """Returns short conv model with mask at the end of the network. Network is interpretation of original network
    from papers."""
    screen_input = layers.Input(input_size)
    actions_input = layers.Input(n_actions)

    x = layers.Lambda(lambda x: x / 255.0)(screen_input)

    x = layers.Conv2D(16, (3, 3), padding='same')(x)
    x = layers.Conv2D(16, (3, 3), padding='same')(x)
    x = layers.Conv2D(16, (3, 3), padding='same')(x)
    x = layers.Conv2D(16, (3, 3), padding='same')(x)
    x = layers.ReLU()(x)

    x = layers.Conv2D(32, (3, 3), padding='same')(x)
    x = layers.Conv2D(32, (3, 3), padding='same')(x)
    x = layers.ReLU()(x)

    x = layers.Flatten()(x)
    x = layers.Dense(256, activation='relu')(x)
    x = layers.Dense(n_actions)(x)
    x = layers.Multiply()([x, actions_input])

    model = models.Model(inputs=[screen_input, actions_input], outputs=x)
    optimizer = optimizers.RMSprop(lr=lr,
                                   rho=0.95,
                                   epsilon=0.01,
                                   momentum=0.95)
    model.compile(optimizer, loss='mse')
    return model
Ejemplo n.º 26
0
def getModel():
    try:
        return load_model('./sentiment_model.h5')
    except:
        # 모델 구성
        train_x = [term_frequency(d) for d, _ in train_docs]
        test_x = [term_frequency(d) for d, _ in test_docs]
        train_y = [c for _, c in train_docs]
        test_y = [c for _, c in test_docs]

        x_train = np.asarray(train_x).astype('float32')
        x_test = np.asarray(test_x).astype('float32')

        y_train = np.asarray(train_y).astype('float32')
        y_test = np.asarray(test_y).astype('float32')

        model = models.Sequential()
        model.add(layers.Dense(64, activation='relu', input_shape=(500, )))
        model.add(layers.Dense(64, activation='relu'))
        model.add(layers.Dense(1, activation='sigmoid'))

        # # 모델 학습 과정
        model.compile(optimizer=optimizers.RMSprop(lr=0.001),
                      loss=losses.binary_crossentropy,
                      metrics=[metrics.binary_accuracy])

        # # 모델 학습
        model.fit(x_train, y_train, epochs=15, batch_size=512)
        results = model.evaluate(x_test, y_test)

        # 모델 저장
        model.save('sentiment_model.h5')
        return model
Ejemplo n.º 27
0
 def get_opt():
     if opt == 'adam':
         return optimizers.Adam(lr=lr, clipnorm=1.)
     elif opt == 'rmsprop':
         return optimizers.RMSprop(lr=lr, clipnorm=1.)
     else:
         raise Exception('Only Adam and RMSProp are available here')
Ejemplo n.º 28
0
def main():
    #file = r'./db/fucDatasetReg_1F_NoLinear.csv'
    #file = r'./db/fucDatasetReg_2F.csv'
    file = r'./db/fucDatasetReg_3F_1000.csv'
    x_train, x_test, y_train, y_test = getCsvDataset(file)

    lr = 1e-3
    EPOCHES = 200
    # optimizer = optimizerTf(lr=lr)
    # losses,_ = trainModel(x_train,y_train,optimizer,epochs=EPOCHES)
    # plotLoss(losses)

    opts = []
    # fast group
    opts.append((optimizers.SGD(learning_rate=lr), 'SGD'))
    opts.append((optimizers.RMSprop(learning_rate=lr), 'RMSprop'))
    opts.append((optimizers.Adam(learning_rate=lr), 'Adam'))
    opts.append((optimizers.Adamax(learning_rate=lr), 'Adamax'))
    opts.append((optimizers.Nadam(learning_rate=lr), 'Nadam'))
    # # slow group
    opts.append((optimizers.Adadelta(learning_rate=lr), 'Adadelta'))
    opts.append((optimizers.Adagrad(learning_rate=lr), 'Adagrad'))
    opts.append((optimizers.Ftrl(learning_rate=lr), 'Ftrl'))

    lossesDict = {}
    for opti, name in opts:
        losses, _ = trainModel(x_train, y_train, opti, epochs=EPOCHES)
        lossesDict[name] = losses
        #print(name, losses)

    plotLossDict(lossesDict)
Ejemplo n.º 29
0
def run_experiment(train_generator, validation_generator, parameters):
    model = models.Sequential()
    model.add(
        layers.Conv2D(32, (3, 3), activation='relu',
                      input_shape=(150, 150, 3)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.RMSprop(lr=1e-4),
                  metrics=['acc'])

    history = model.fit(train_generator,
                        steps_per_epoch=parameters.steps_per_epoch,
                        epochs=parameters.epochs,
                        validation_data=validation_generator,
                        validation_steps=p.validation_steps)

    return model, history
Ejemplo n.º 30
0
    def __init__(self):
        """Init method.

        We define here a simple (shallow) CNN.
        """
        self.num_train_samples = 0
        self.num_feat = 1
        self.num_labels = 1
        self.is_trained = False

        self.model = models.Sequential()
        self.model.add(
            layers.Conv2D(32, (3, 3),
                          activation='relu',
                          input_shape=(40, 40, 3)))
        self.model.add(layers.MaxPooling2D((2, 2)))
        self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        self.model.add(layers.MaxPooling2D((2, 2)))
        self.model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        self.model.add(layers.MaxPooling2D((2, 2)))
        self.model.add(layers.Flatten())
        self.model.add(layers.Dropout(0.6))
        self.model.add(layers.Dense(256, activation='relu'))
        self.model.add(layers.Dense(1, activation='sigmoid'))

        self.model.compile(loss='binary_crossentropy',
                           optimizer=optimizers.RMSprop(lr=2e-3),
                           metrics=['accuracy'])