Exemplo n.º 1
0
def baseline_model_conv_2(train_X,
                          train_y,
                          test_X,
                          test_y,
                          num_pixels=256,
                          num_classes=10,
                          learningRate=0.001,
                          reg=l1(0.0001)):

    # encode targets
    train_y_enc = to_categorical(train_y)
    test_y_enc = to_categorical(test_y)

    # define model
    model = Sequential()
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               activation='elu',
               input_shape=(16, 16, 1),
               activity_regularizer=reg))
    model.add(Dropout(0.25))

    #model.add(Conv2D(64, (3, 3), activation='elu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dense(num_classes, activation='sigmoid'))
    opt = optimizers.nadam(lr=learningRate)
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=opt,
                  metrics=['accuracy'])
    return model
Exemplo n.º 2
0
 def build(self):
     word_inputs = kl.Input(shape=(None, MAX_WORD_LENGTH + 2),
                            dtype="int32")
     inputs = [word_inputs]
     word_outputs = self.build_word_cnn(word_inputs)
     if len(self.word_vectorizers) > 0:
         additional_word_inputs = [
             kl.Input(shape=(None, input_dim), dtype="float32")
             for input_dim, dense_dim in self.word_vectorizers
         ]
         inputs.extend(additional_word_inputs)
         additional_word_embeddings = [
             kl.Dense(dense_dim)(additional_word_inputs[i])
             for i, (_, dense_dim) in enumerate(self.word_vectorizers)
         ]
         word_outputs = kl.Concatenate()([word_outputs] +
                                         additional_word_embeddings)
     outputs, lstm_outputs = self.build_basic_network(word_outputs)
     compile_args = {
         "optimizer": ko.nadam(lr=0.002, clipnorm=5.0),
         "loss": "categorical_crossentropy",
         "metrics": ["accuracy"]
     }
     self.model_ = Model(inputs, outputs)
     self.model_.compile(**compile_args)
     if self.verbose > 0:
         log.info(str(self.model_.summary()))
     return self
Exemplo n.º 3
0
def evaluate_model_dense_2(train_X,
                           train_y,
                           test_X,
                           test_y,
                           num_pixels=256,
                           num_classes=10,
                           learningRate=0.001,
                           reg=l1(0.001)):

    # encode targets
    train_y_enc = to_categorical(train_y)
    test_y_enc = to_categorical(test_y)

    #have to replace with our models
    # define model
    model = Sequential()

    model.add(
        Dense(num_pixels,
              input_dim=num_pixels,
              kernel_initializer='normal',
              activation='elu',
              activity_regularizer=reg))
    model.add(Dense(128, kernel_initializer='normal', activation='relu'))
    model.add(Dense(64, kernel_initializer='normal', activation='relu'))
    model.add(Dense(32, kernel_initializer='normal', activation='relu'))
    model.add(Dense(16, kernel_initializer='normal', activation='relu'))
    model.add(
        Dense(num_classes, kernel_initializer='normal', activation='sigmoid'))

    opt = optimizers.nadam(lr=learningRate)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    return model
Exemplo n.º 4
0
def create_model(input_size, output_size, n_layers, n_neurons,
                 activation_function, learning_rate, dropout_rate, optimizer):
    model = models.Sequential()
    model.add(
        layers.Dense(n_neurons,
                     input_shape=(input_size, ),
                     name='new_androdet_dense_1'))
    for _ in range(n_layers):
        if dropout_rate != 0.0:
            model.add(layers.Dropout(dropout_rate, noise_shape=None,
                                     seed=None))
        model.add(layers.Dense(n_neurons, activation=activation_function))
    model.add(layers.Dense(output_size, activation="sigmoid"))
    #model.summary()
    if optimizer == 'rmsprop':
        opt = optimizers.rmsprop(lr=learning_rate)
    elif optimizer == 'adam':
        opt = optimizers.adam(lr=learning_rate)
    elif optimizer == 'sgd':
        opt = optimizers.sgd(lr=learning_rate)
    elif optimizer == 'adagrad':
        opt = optimizers.adagrad(lr=learning_rate)
    elif optimizer == 'adadelta':
        opt = optimizers.adadelta(lr=learning_rate)
    elif optimizer == 'adamax':
        opt = optimizers.adamax(lr=learning_rate)
    elif optimizer == 'nadam':
        opt = optimizers.nadam(lr=learning_rate)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=["mean_squared_error"])
    return model
Exemplo n.º 5
0
def createArchitecture(parameters):

    optimizer = 0
    if parameters["optimizer"] == 'rmsprop':
        optimizer = optimizers.rmsprop(lr=parameters["learning_rate"],
                                       epsilon=parameters["epsilon"])
    elif parameters["optimizer"] == 'adam':
        optimizer = optimizers.adam(lr=parameters["learning_rate"],
                                    epsilon=parameters["epsilon"])
    elif parameters["optimizer"] == 'nadam':
        optimizer = optimizers.nadam(lr=parameters["learning_rate"],
                                     epsilon=parameters["epsilon"])
    elif parameters["optimizer"] == 'sgd':
        optimizer = optimizers.sgd(lr=parameters["learning_rate"])
    #else:
    #    optimizer = parameters["optimizer"]

    if parameters["use_embedding_layer"]:
        input = Input(shape=(parameters["max_seq_len"], ))
        model = Embedding(input_dim=parameters["one_hot_vector_len"],
                          output_dim=parameters["embedding_layer_output"],
                          input_length=parameters["max_seq_len"])(input)
        if parameters["embedding_dropout"] > 0:
            model = Dropout(rate=parameters["embedding_dropout"])(model)
    else:
        input = Input(shape=(parameters["max_seq_len"],
                             parameters["one_hot_vector_len"]))
        model = input
    if parameters["bi_lstm1_units"] > 0:
        model = Bidirectional(
            CuDNNLSTM(units=parameters["bi_lstm1_units"],
                      return_sequences=True))(model)
    if parameters["bi_lstm2_units"] > 0:
        model = Bidirectional(
            CuDNNLSTM(units=parameters["bi_lstm2_units"],
                      return_sequences=True))(model)
    if parameters["bi_lstm3_units"] > 0:
        model = Bidirectional(
            CuDNNLSTM(units=parameters["bi_lstm3_units"],
                      return_sequences=True))(model)
    if parameters["use_crf_layer"]:
        crf = CRF(parameters["num_tags"], learn_mode="marginal")
        out = crf(model)  # output
        model = Model(input, out)
        model.compile(optimizer=optimizer,
                      loss=losses.crf_loss,
                      metrics=[metrics.crf_accuracy,
                               avg_proximity_metric()])
    else:
        out = TimeDistributed(
            Dense(parameters["num_tags"], activation="softmax"))(model)
        model = Model(input, out)
        model.compile(optimizer=optimizer,
                      loss="categorical_crossentropy",
                      metrics=["accuracy", avg_proximity_metric()])
    model.summary()
    return model
Exemplo n.º 6
0
def startLearning(Env, max_board_size=7, loadFileNumber=-1, gpuToUse=None, memoryAllocation=800000):
    # Set to use GPU explicitly
    if gpuToUse != None:
        environ["CUDA_VISIBLE_DEVICES"]=gpuToUse
    else:
        environ["CUDA_VISIBLE_DEVICES"]="0"

    env = Env
    nb_actions = env.action_space.n

    # Init size based on max_board_size
    if max_board_size not in [11, 7, 19]:
        raise EnvironmentError

    layer0Size = 4096
    layer1Size = 4096
    layer2Size = 4096
    layer3Size = 0
    layer4Size = 0
    layer5Size = 0

    # Next, we build a very simple model. 
    model = Sequential()
    model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
    model.add(Dense(layer0Size))
    model.add(LeakyReLU(alpha=0.003))
    model.add(Dense(layer1Size))
    model.add(LeakyReLU(alpha=0.003))
    model.add(Dense(layer2Size))
    model.add(LeakyReLU(alpha=0.003))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))

    #A little diagnosis of the model summary
    print(model.summary())

    # Finally, we configure and compile our agent.
    memory = SequentialMemory(limit=memoryAllocation, window_length=1)
    policy = BoltzmannQPolicy()
    dqn = DQNAgent(model=model, batch_size=32, nb_actions=nb_actions, memory=memory, policy=policy, enable_dueling_network=True, gamma=.97)
    dqn.compile(nadam(lr=0.01), metrics=['mae']) 


    # Here we load from a file an old agent save if specified.
    if loadFileNumber >= 0:
        loadFile = "Larger_Memeory_BOARDSIZE_" + str(max_board_size) + "_DQN_LAYERS_" + str(layer0Size) + "_" + str(layer1Size) + "_" + str(layer2Size) + "_" + str(layer3Size) + "_" + str(layer4Size) + "_" + str(layer5Size) +  "_SAVENUMBER_" + str(loadFileNumber) + ".h5f"
        dqn.load_weights(loadFile)

    saveFileNumberCounter = 0
    while True:
        dqn.fit(env, nb_steps=100010, visualize=False, verbose=1)
        saveFileNumberCounter+=1
        saveFile = "Larger_Memeory_BOARDSIZE_" + str(max_board_size) + "_DQN_LAYERS_" + str(layer0Size) + "_" + str(layer1Size) + "_" + str(layer2Size) + "_" + str(layer3Size) + "_" + str(layer4Size) + "_" + str(layer5Size) + "_SAVENUMBER_" + str(loadFileNumber + saveFileNumberCounter) + ".h5f"
        dqn.save_weights(saveFile, overwrite=True)
Exemplo n.º 7
0
def model7():
    base_model = ResNet50(weights=None, include_top=False, input_shape=(300, 300, 3))
    base_model.load_weights('../input/keras-pretrained-models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
    base_model.trainable = False
    model = Sequential([
        base_model,
        GlobalAveragePooling2D(),
        Dense(6, activation='softmax')
    ])
    opt = optimizers.nadam(lr=0.0001)
    model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy'])
    return model
Exemplo n.º 8
0
def learn_effec_model(num_pixels = 256, num_classes = 10, learning_rate=0.001):
    model = Sequential()
    model.add(Conv2D(32, kernel_size=(3, 3), kernel_initializer='glorot_uniform', activation='relu', input_shape=(16,16,1)))
    model.add(Conv2D(64, (3, 3), kernel_initializer='glorot_uniform', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, kernel_initializer='glorot_uniform', activation='sigmoid'))
    optimizer = optimizers.nadam(lr=learning_rate)
    model.compile(loss=keras.losses.categorical_crossentropy,optimizer=optimizer,metrics=['accuracy'])
    return model
Exemplo n.º 9
0
def model5():
    # v19-0.88
    base_model = InceptionV3(weights=None, include_top=False, input_shape=(300, 300, 3))
    base_model.load_weights('../input/keras-pretrained-models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
    base_model.trainable = False
    model = Sequential([
        base_model,
        GlobalAveragePooling2D(),
        Dropout(0.02),
        Dense(1024, activation='relu'),
        Dense(2, activation='softmax')
    ])
    opt = optimizers.nadam(lr=0.0001)
    model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy'])
    return model
 def build(self):
     word_inputs = kl.Input(shape=(None, MAX_WORD_LENGTH + 2),
                            dtype="int32")
     inputs = [word_inputs]
     word_outputs = self.build_word_cnn(word_inputs)
     outputs, lstm_outputs = self.build_basic_network(word_outputs)
     compile_args = {
         "optimizer": ko.nadam(lr=0.002, clipnorm=5.0),
         "loss": "categorical_crossentropy",
         "metrics": ["accuracy"]
     }
     self.model_ = Model(inputs, outputs)
     self.model_.compile(**compile_args)
     if self.verbose > 0:
         print(self.model_.summary())
     return self
Exemplo n.º 11
0
def create_model(layers_and_filters,
                 kernels,
                 activation,
                 input_shape,
                 dropout_rate,
                 optimizer,
                 learning_rate,
                 output_size=1):
    model = models.Sequential()
    i = 0
    for filters in layers_and_filters:
        model.add(
            layers.Conv2D(filters,
                          kernel_size=kernels[i],
                          strides=kernels[i],
                          activation=activation,
                          input_shape=input_shape))
        i += 1
        if i < len(layers_and_filters):
            model.add(layers.MaxPooling2D(pool_size=(2, 2)))
            model.add(layers.BatchNormalization())

    if dropout_rate != 0:
        model.add(layers.Dropout(dropout_rate))
    model.add(layers.Flatten())
    model.add(layers.Dense(output_size, activation='sigmoid'))

    if optimizer == 'rmsprop':
        opt = optimizers.rmsprop(lr=learning_rate)
    elif optimizer == 'adam':
        opt = optimizers.adam(lr=learning_rate)
    elif optimizer == 'sgd':
        opt = optimizers.sgd(lr=learning_rate)
    elif optimizer == 'adagrad':
        opt = optimizers.adagrad(lr=learning_rate)
    elif optimizer == 'adadelta':
        opt = optimizers.adadelta(lr=learning_rate)
    elif optimizer == 'adamax':
        opt = optimizers.adamax(lr=learning_rate)
    elif optimizer == 'nadam':
        opt = optimizers.nadam(lr=learning_rate)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=["mean_squared_error"])

    #model.summary()
    return model
Exemplo n.º 12
0
def nadam(lr=0.002,
          beta_1=0.9,
          beta_2=0.999,
          epsilon=None,
          schedule_decay=0.004):
    """
    Adam optimizer
    :param lr: >=0, learning rate
    :param beta_1: 0 < beta_1 < 1, generally close to 1
    :param beta_2: 0 < beta_2 < 1, generally close to 1
    :param epsilon: >=0, fuzz factor. If None, defaults to K.epsilon()
    """
    return optimizers.nadam(lr=lr,
                            beta_1=beta_1,
                            beta_2=beta_2,
                            epsilon=epsilon,
                            schedule_decay=schedule_decay)
def learn_effec_model(num_pixels=256, num_classes=10, learning_rate=0.001):
    model = Sequential()
    model.add(
        Dense(num_pixels,
              input_dim=num_pixels,
              kernel_initializer='normal',
              activation='elu'))
    model.add(Dense(128, kernel_initializer='normal', activation='elu'))
    model.add(Dense(64, kernel_initializer='normal', activation='relu'))
    model.add(Dense(32, kernel_initializer='normal', activation='relu'))
    model.add(Dense(16, kernel_initializer='normal', activation='relu'))
    model.add(
        Dense(num_classes, kernel_initializer='normal', activation='sigmoid'))
    optimizer = optimizers.nadam(lr=learning_rate)
    #optimizer = optimizers.SGD(lr=learning_rate)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model
Exemplo n.º 14
0
    def __init__(self, input_dim=0, output_dim=0, lr=0.01):
        self.input_dim = input_dim
        self.lr = lr
        # LSTM 신경망
        self.model = Sequential()

        self.model.add(LSTM(128, input_shape=(1, input_dim), return_sequences=True, stateful=False, dropout=0.5, activation='relu'))
        self.model.add(BatchNormalization())
        self.model.add(Attention(1)) ### ATTENTION
        self.model.add(Dense(32, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(2, activation="softmax"))

        #self.model.compile(optimizer=Adam(lr=lr), loss='mse')
        nadam = optimizers.nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
        self.model.compile(loss='mean_squared_error', optimizer=nadam)
        #self.model.compile(optimizer=Adam(lr=lr,beta_1=0.9, beta_2=0.999, amsgrad=True), loss='mse')


        self.prob = None
Exemplo n.º 15
0
 def build(self):
     """Builds the network using Keras.
     """
     word_inputs = kl.Input(shape=(None, MAX_WORD_LENGTH+2), dtype="int32")
     inputs = [word_inputs]
     word_outputs = self._build_word_cnn(word_inputs)
     if len(self.word_vectorizers) > 0:
         additional_word_inputs = [kl.Input(shape=(None, input_dim), dtype="float32")
                                   for input_dim, dense_dim in self.word_vectorizers]
         inputs.extend(additional_word_inputs)
         additional_word_embeddings = [kl.Dense(dense_dim)(additional_word_inputs[i])
                                       for i, (_, dense_dim) in enumerate(self.word_vectorizers)]
         word_outputs = kl.Concatenate()([word_outputs] + additional_word_embeddings)
     outputs, lstm_outputs = self._build_basic_network(word_outputs)
     compile_args = {"optimizer": ko.nadam(lr=0.002, clipnorm=5.0),
                     "loss": "categorical_crossentropy", "metrics": ["accuracy"]}
     self.model_ = Model(inputs, outputs)
     self.model_.compile(**compile_args)
     if self.verbose > 0:
         self.model_.summary(print_fn=log.info)
     return self
def evaluate_model_conv(train_X,
                        train_y,
                        test_X,
                        test_y,
                        num_pixels=256,
                        num_classes=10,
                        learningRate=0.001):

    # encode targets
    train_y_enc = to_categorical(train_y)
    test_y_enc = to_categorical(test_y)

    # define model
    model = Sequential()
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               activation='elu',
               input_shape=(16, 16, 1)))
    #model.add(Conv2D(64, (3, 3), activation='elu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dense(num_classes, activation='sigmoid'))

    opt = optimizers.nadam(lr=learningRate)

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=opt,
                  metrics=['accuracy'])
    # fit model
    model.fit(train_X, train_y_enc, epochs=10, batch_size=8, verbose=2)
    #model.fit(train_X, train_y_enc, epochs=12, batch_size=12, verbose=2)

    # evaluate the model
    _, test_acc = model.evaluate(test_X, test_y_enc, verbose=0)
    print(test_acc)
    return model, test_acc
Exemplo n.º 17
0
    def create_model(self):
        # branch1 = self.__create_sub_model()
        # branch2 = self.__create_sub_model()
        # branch3 = self.__create_sub_model()
        # m4 = self.__create_sub_model()

        model = Sequential()
        model.add(Merge(self.models, mode='concat'))

        model.add(Dense(8,
                        kernel_initializer=initializers.random_normal(mean=0.01, stddev=0.05, seed=c.random_seed),
                        bias_initializer='zero',
                        kernel_regularizer=regularizers.l2(self.reg_val),
                        activity_regularizer=regularizers.l2(self.reg_val)))
        model.add(BatchNormalization())
        model.add(LeakyReLU())

        model.add(Dense(1, init='normal', activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                           optimizer=optimizers.nadam(lr=self.learning_rate),
                           metrics=['accuracy'])
        return model
def evaluate_model_dense(train_X,
                         train_y,
                         test_X,
                         test_y,
                         num_pixels=256,
                         num_classes=10,
                         learningRate=0.001):

    # encode targets
    train_y_enc = to_categorical(train_y)
    test_y_enc = to_categorical(test_y)

    #have to replace with our models
    # define model
    model = Sequential()

    model.add(
        Dense(num_pixels,
              input_dim=num_pixels,
              kernel_initializer='normal',
              activation='elu'))
    model.add(Dense(128, kernel_initializer='normal', activation='elu'))
    model.add(Dense(64, kernel_initializer='normal', activation='relu'))
    model.add(Dense(32, kernel_initializer='normal', activation='relu'))
    model.add(Dense(16, kernel_initializer='normal', activation='relu'))
    model.add(
        Dense(num_classes, kernel_initializer='normal', activation='sigmoid'))

    opt = optimizers.nadam(lr=learningRate)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    # fit model
    model.fit(train_X, train_y_enc, epochs=12, batch_size=12, verbose=2)
    # evaluate the model
    _, test_acc = model.evaluate(test_X, test_y_enc, verbose=0)
    print(test_acc)
    return model, test_acc
Exemplo n.º 19
0
    def __init__(self, input_dim=0, output_dim=0, lr=0.01):
        self.input_dim = input_dim
        self.lr = lr

        # LSTM 신경망
        self.model = Sequential()

        self.model.add(LSTM(256, input_shape=(1, input_dim),
                            return_sequences=True, stateful=False, dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(LSTM(256, return_sequences=True, stateful=False, dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(LSTM(256, return_sequences=False, stateful=False, dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(Dense(output_dim))
        self.model.add(Activation('sigmoid'))
        #self.model.compile(optimizer=Adam(lr=lr), loss='mse')
        adam = optimizers.nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
        self.model.compile(loss='mean_squared_error', optimizer=adam)
        #self.model.compile(optimizer=Adam(lr=lr,beta_1=0.9, beta_2=0.999, amsgrad=True), loss='mse')


        self.prob = None
Exemplo n.º 20
0
def create_rnn_lstm():
    # Add an Input Layer
    input_layer = layers.Input((70, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Add the LSTM Layer
    lstm_layer = layers.LSTM(256)(embedding_layer)

    # Add the output Layers
    output_layer1 = layers.Dense(50, activation="sigmoid")(lstm_layer)
    output_layer1 = layers.Dropout(0.5)(output_layer1)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.nadam(), loss='binary_crossentropy')

    return model
def fine_tune(base_model, n_class, multi_gpu_flag=False, gpus=1, method=0):

    if method == 0:
        base_model.layers.pop()
        x = base_model.layers[-1].output
        # x = base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(100, activation='relu')(x)
        x = BatchNormalization(trainable=True, axis=1)(x)
        x = Dropout(0.5)(x)
        x = Dense(50, activation='relu')(x)
        x = BatchNormalization(trainable=True, axis=1)(x)
        predictions = Dense(n_class, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)

        model.compile(optimizer=nadam(lr=0.00001),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        print('Model compiled!')
        if multi_gpu_flag:
            model = multi_gpu_model(model, gpus=gpus)
        return model
    else:
        return base_model
Exemplo n.º 22
0
y_test = np_utils.to_categorical(y_test - 1, 3)
#y_test = np_utils.to_categorical(y_test-1, 12)

y_train = train[:, 0]
y_train = np_utils.to_categorical(y_train - 1, 3)
#y_train = np_utils.to_categorical(y_train-1, 12)

input_shape = (x_test.shape[1], 1)

#fpath = './weight4/weights.{epoch:02d}-{loss:.2f}-{val_loss:.2f}.hdf5'
#cp_cb = ModelCheckpoint(filepath=fpath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)

sgd1 = SGD(lr=0.005, decay=1e-6, momentum=0.9, nesterov=True)
nadam1 = nadam(lr=0.005,
               beta_1=0.9,
               beta_2=0.999,
               epsilon=None,
               schedule_decay=0.004)
adam1 = Adam(lr=0.005)
adamax1 = Adamax(lr=0.005)
adagrad1 = Adagrad(lr=0.005)
adadelta1 = Adadelta(lr=0.005)
rmsprop1 = RMSprop(lr=0.005)

es_cb = EarlyStopping(monitor='val_loss',
                      min_delta=0,
                      patience=100,
                      verbose=0,
                      mode='auto')

#1,2, -CBS
Exemplo n.º 23
0
# 기본 sgd
optimizer_list.append(['SGD', optimizers.SGD()])
# momentum
optimizer_list.append(['Momentum', optimizers.SGD(momentum=0.9)])
# NAG
optimizer_list.append((['NAG', optimizers.SGD(momentum=0.9, nesterov=True)]))
# Adagrad
optimizer_list.append(['Adagrad', optimizers.adagrad()])
# RMSProp
optimizer_list.append(['RMSProp', optimizers.rmsprop()])
# AdaDelta
optimizer_list.append(['AdaDelta', optimizers.adadelta()])
# Adam
optimizer_list.append(['Adam', optimizers.adam()])
# Nadam
optimizer_list.append(['Nadam', optimizers.nadam()])

score_list = []
opt_name_list = []
for optimizer_element in optimizer_list:
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer_element[1],
                  metrics=['accuracy'])

    # 5. 모델 학습시키기
    model.fit(x_train, y_train, epochs=1500, batch_size=64)

    # 6. 모델 평가하기
    scores = model.evaluate(x_test, y_test)
    score = scores[1] * 100
    opt_name = optimizer_element[0]
Exemplo n.º 24
0
def prepare_model(model,
                  num_classes,
                  batch_size,
                  val_batch_size,
                  max_patience,
                  optimizer,
                  save_path,
                  volume_indices,
                  data_gen_kwargs,
                  data_augmentation_kwargs=None,
                  learning_rate=0.001,
                  clipnorm=10,
                  num_outputs=1,
                  adversarial=False,
                  adv_weight=0.2,
                  save_every=0,
                  mask_to_liver=False,
                  show_model=True,
                  liver_only=False):

    if data_augmentation_kwargs is None:
        data_augmentation_kwargs = {}

    if num_outputs not in [1, 2]:
        raise ValueError("num_outputs must be 1 or 2")

    if liver_only and num_outputs != 1:
        raise ValueError("num_outputs must be 1 when liver_only is True")

    if not liver_only:
        lesion_output = 'output_0'
        liver_output = 'output_1'
    else:
        lesion_output = None
        liver_output = 'output_0'

    if adversarial and not num_outputs == 2:
        print("num_outputs must be 2 when adversarial is True")
    '''
    Data generators for training and validation sets
    '''
    gen = {}
    print(' > Preparing data generators...')
    if 'recurrent' in data_gen_kwargs:
        data_gen_kwargs['model'] = model
    gen['train'] = data_generator(volume_indices=volume_indices['train'],
                                  batch_size=batch_size,
                                  shuffle=True,
                                  loop_forever=True,
                                  transform_kwargs=data_augmentation_kwargs,
                                  **data_gen_kwargs)
    gen['valid'] = data_generator(volume_indices=volume_indices['valid'],
                                  batch_size=val_batch_size,
                                  shuffle=False,
                                  loop_forever=True,
                                  transform_kwargs=None,
                                  **data_gen_kwargs)
    gen['valid_callback'] = data_generator( \
                                        volume_indices=volume_indices['valid'],
                                        batch_size=val_batch_size,
                                        shuffle=False,
                                        loop_forever=False,
                                        transform_kwargs=None,
                                        **data_gen_kwargs)
    '''
    Metrics
    '''
    metrics = {}
    if lesion_output:
        metrics[lesion_output] = []
    if num_outputs == 2 or lesion_output is None:
        metrics[liver_output] = []

    # Accuracy
    def accuracy(y_true, y_pred):
        y_true_ = K.clip(y_true - 1, 0, 1)
        if num_classes == 1:
            return K.mean(K.equal(y_true, K.round(y_pred)))
        else:
            return K.mean(
                K.equal(K.squeeze(y_true, 1), K.argmax(y_pred, axis=1)))

    if lesion_output:
        metrics[lesion_output].append(accuracy)

    # Dice averaged over slices.
    if lesion_output:
        metrics[lesion_output].append(dice_loss(2))
        metrics[lesion_output].append(dice_loss(2, masked_class=0))
    if num_outputs == 2 or lesion_output is None:
        metrics[liver_output].append(dice_loss([1, 2]))
    '''
    Callbacks
    '''
    callbacks = OrderedDict()

    ## Define early stopping callback
    #early_stopping = EarlyStopping(monitor='val_acc', mode='max',
    #patience=max_patience, verbose=0)
    #callbacks.append(early_stopping)

    # Save prediction images
    if save_every:
        save_predictions = SavePredictions(num_epochs=save_every,
                                           data_gen=gen['valid_callback'],
                                           save_path=os.path.join(
                                               save_path, "predictions"))
        callbacks['save_predictions'] = save_predictions

    # Compute dice on the full data
    if lesion_output:
        output_name = lesion_output if num_outputs == 2 else None
        dice_lesion = Dice(target_class=2, output_name=output_name)
        dice_lesion_inliver = Dice(target_class=2,
                                   mask_class=0,
                                   output_name=output_name)
        callbacks['dice_lesion'] = dice_lesion
        callbacks['dice_lesion_inliver'] = dice_lesion_inliver
        metrics[lesion_output].extend(dice_lesion.get_metrics())
        metrics[lesion_output].extend(dice_lesion_inliver.get_metrics())
    if num_outputs == 2 or lesion_output is None:
        output_name = liver_output if num_outputs == 2 else None
        dice_liver = Dice(target_class=[1, 2], output_name=output_name)
        callbacks['dice_liver'] = dice_liver
        metrics[liver_output].extend(dice_liver.get_metrics())

    # Define model saving callback
    if lesion_output is not None:
        monitor = 'val_dice_loss_2' if num_outputs==1 \
            else 'val_output_0_dice_loss_2'
        if mask_to_liver:
            monitor += '_m0'
    else:
        monitor = 'val_dice_loss_1_2' if num_outputs==1 \
            else 'val_output_0_dice_loss_1_2'
    checkpointer_best_ldice = ModelCheckpoint(filepath=os.path.join(
        save_path, "best_weights_ldice.hdf5"),
                                              verbose=1,
                                              monitor=monitor,
                                              mode='min',
                                              save_best_only=True,
                                              save_weights_only=False)
    if lesion_output is not None:
        monitor = 'val_dice_2' if num_outputs == 1 else 'val_output_0_dice_2'
        if mask_to_liver:
            monitor += '_m0'
    else:
        monitor = 'val_dice_1_2' if num_outputs==1 \
            else 'val_output_0_dice_1_2'
    checkpointer_best_dice = ModelCheckpoint(filepath=os.path.join(
        save_path, "best_weights_dice.hdf5"),
                                             verbose=1,
                                             monitor=monitor,
                                             mode='max',
                                             save_best_only=True,
                                             save_weights_only=False)
    if not mask_to_liver and lesion_output is not None:
        monitor = 'val_dice_loss_2_m0' if num_outputs==1 \
            else 'val_output_0_dice_loss_2_m0'
        checkpointer_best_mldice = ModelCheckpoint(\
                filepath=os.path.join(save_path, "best_weights_mldice.hdf5"),
                                      verbose=1,
                                      monitor=monitor,
                                      mode='min',
                                      save_best_only=True,
                                      save_weights_only=False)
        callbacks['checkpointer_best_mldice'] = checkpointer_best_mldice
    callbacks['checkpointer_best_ldice'] = checkpointer_best_ldice
    callbacks['checkpointer_best_dice'] = checkpointer_best_dice

    # Save every last epoch
    checkpointer_last = ModelCheckpoint(filepath=os.path.join(
        save_path, "weights.hdf5"),
                                        verbose=0,
                                        save_best_only=False,
                                        save_weights_only=False)
    callbacks['checkpointer_last'] = checkpointer_last

    # File logging
    logger = FileLogger(
        log_file_path=os.path.join(save_path, "training_log.txt"))
    callbacks['logger'] = logger
    '''
    Compile model
    '''
    print('\n > Compiling model...')
    if optimizer == 'RMSprop':
        optimizer = RMSprop(lr=learning_rate,
                            rho=0.9,
                            epsilon=1e-8,
                            decay=0.,
                            clipnorm=clipnorm)
    elif optimizer == 'nadam':
        optimizer = nadam(lr=learning_rate,
                          beta_1=0.9,
                          beta_2=0.999,
                          epsilon=1e-08,
                          schedule_decay=0,
                          clipnorm=clipnorm)
    elif optimizer == 'adam':
        optimizer = adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0,
                         clipnorm=clipnorm)
    elif optimizer == 'sgd':
        optimizer = SGD(lr=learning_rate,
                        momentum=0.9,
                        decay=0.,
                        nesterov=True,
                        clipnorm=clipnorm)
    else:
        raise ValueError("Unknown optimizer: {}".format(optimizer))
    if not hasattr(model, 'optimizer'):

        def loss(loss_func, weight):
            def f(y_true, y_pred):
                loss = loss_func(y_true, y_pred) * weight
                return loss

            f.__name__ = loss_func.__name__
            return f

        masked_class = 0 if mask_to_liver else None
        losses = {}
        if lesion_output:
            losses[lesion_output] = loss(
                dice_loss(2, masked_class=masked_class), 1. - adv_weight)
        if num_outputs == 2 or lesion_output is None:
            losses[liver_output] = loss(dice_loss([1, 2]), 1. - adv_weight)
        if adversarial:
            losses['out_adv_0_d'] = loss(mean_squared_error, adv_weight)
            losses['out_adv_1_d'] = loss(mean_squared_error, adv_weight)
            losses['out_adv_0_g'] = loss(mean_squared_error, adv_weight)
            losses['out_adv_1_g'] = loss(mean_squared_error, adv_weight)
            losses['out_disc_0'] = loss(mean_squared_error, adv_weight)
            losses['out_disc_1'] = loss(mean_squared_error, adv_weight)
        model.compile(loss=losses, optimizer=optimizer, metrics=metrics)
    '''
    Print model summary
    '''
    if show_model:
        from keras.utils.visualize_util import plot
        #model.summary()
        plot(model, to_file=os.path.join(save_path, 'model.png'))

    return model, callbacks, gen
Exemplo n.º 25
0
def s5(cuda,results,npydata,kfold_splits,batch_size,epochs):
    os.environ["CUDA_VISIBLE_DEVICES"]=cuda
    ogdata = np.load(npydata)
    num_classes = 3

    if os.path.isdir(results):
        print('already exists')
    else:
        os.mkdir(results)

    ### Shuffle and parse data into x and y
    np.random.shuffle(ogdata)
    X        = ogdata[:,:,:,1:]
    y        = ogdata[:,1,1,0]
    y=y-1

    ###KFOLDSPLITS - insert y, get test[] and train[] for different folds
    y = y[np.newaxis].T
    y = to_categorical(y, num_classes)
    train =[[] for i in range(kfold_splits)]
    test =[[] for i in range(kfold_splits)]

    # Go through each class
    for cla in range (num_classes):
        splitlist = []
        # Grab all examples from class and shuffle
        shuffledclass = np.squeeze(np.nonzero(y[:, cla]))
        np.random.shuffle(shuffledclass)
    #   print('class ' + str(cla) + ' ' + str(shuffledclass))

        #distribute shuffled examples of class evenly into each fold
        for fold in range(kfold_splits):
            startindex=round(fold*(shuffledclass.shape[0]/kfold_splits))
            endindex =round(((fold + 1) * shuffledclass.shape[0] / kfold_splits))
            splitlist.append(shuffledclass[startindex:endindex])
    #       print(shuffledclass[startindex:endindex])

        #Concatenate examples of class cla with previous examples
        for fold in range(kfold_splits):
            test[fold] = np.append(np.asarray(splitlist[fold]), test[fold])
            train[fold] = np.append(np.setdiff1d(shuffledclass, np.asarray(splitlist[fold])),train[fold])
    #       print('fold ' + str(fold) + str(np.asarray(splitlist[fold])))

    for kf in range(kfold_splits):

        print("Running fold", kf+1, "/", kfold_splits)

        ### Get indices of train/test data for current fold
        kf_train = train[kf].astype(int)
        kf_test  = test[kf].astype(int)
        x_train, x_test = X[kf_train], X[kf_test]
        y_train, y_test = y[kf_train], y[kf_test]

        ### Check that our classes are evenly distributed in training/testing
        print ('ytrain0 ' + str(sum(y_train[:, 0])))
        print ('ytrain1 ' + str(sum(y_train[:, 1])))
        print ('ytrain2 ' + str(sum(y_train[:, 2])))
        print ('ytest0 ' + str(sum(y_test[:, 0])))
        print ('ytest1 ' + str(sum(y_test[:, 1])))
        print ('ytest2 ' + str(sum(y_test[:, 2])))

        # Save test (x,y) data
        x_test_loc = os.path.join(results, 'x_test' + str(kf) + '.npy')
        x_train_loc = os.path.join(results, 'x_train' + str(kf) + '.npy')
        y_test_loc = os.path.join(results, 'y_test' + str(kf) + '.npy')
        y_train_loc = os.path.join(results, 'y_train' + str(kf) + '.npy')

        np.save(x_test_loc, x_test)
        #np.save(x_train_loc, x_train)
        np.save(y_test_loc, y_test)
        #np.save(y_train_loc, y_train)

        os.environ["CUDA_VISIBLE_DEVICES"] = cuda
        data_augmentation = True
        model = Sequential()

        # Block 1
        model.add(Conv2D(64, 3, activation='relu', padding='same', name='block1_conv1', input_shape=x_train.shape[1:]))
        model.add(Conv2D(64, 3, activation='relu', padding='same', name='block1_conv2'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        #        model.add(Dropout(0.25))

        # Block 2
        model.add(Conv2D(128, 3, activation='relu', padding='same', name='block2_conv1'))
        model.add(Conv2D(128, 3, activation='relu', padding='same', name='block2_conv2'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        #       model.add(Dropout(0.25))

        # Block 3
        model.add(Conv2D(256, 3, activation='relu', padding='same', name='block3_conv1'))
        model.add(Conv2D(256, 3, activation='relu', padding='same', name='block3_conv2'))
        model.add(Conv2D(256, 3, activation='relu', padding='same', name='block3_conv3'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        #      model.add(Dropout(0.25))

        # Block 4
        model.add(Conv2D(512, 3, activation='relu', padding='same', name='block4_conv1'))
        model.add(Conv2D(512, 3, activation='relu', padding='same', name='block4_conv2'))
        model.add(Conv2D(512, 3, activation='relu', padding='same', name='block4_conv3'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        #     model.add(Dropout(0.25))

        model.add(Flatten())
        #        model.add(Dense(4096, activation='relu'))
        #        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(num_classes, activation='softmax'))

        ### Choice of optimizer
        adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-6)
        nadam = optimizers.nadam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.0004)
        sgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=nadam, metrics=['accuracy'])

        ### Data Augmentation
        if not data_augmentation:
            print('Not using data augmentation.')
            model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
                      validation_data=(x_test, y_test), shuffle=True)
        else:
            print('Using real-time data augmentation.')
            # This will do preprocessing and realtime data augmentation:
            datagen = ImageDataGenerator(
                featurewise_center=False,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.0,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.0,  # randomly shift images vertically (fraction of total height)
                horizontal_flip=True,  # randomly flip images
                vertical_flip=True,
                shear_range=0.1,
                zoom_range=0.1)  # randomly flip images

        ###Run the model
        history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                      steps_per_epoch=x_train.shape[0] // batch_size,
                                      epochs=epochs, validation_data=(x_test, y_test))

        ###Save Results

        # Dump history data into history(kf).pkl file
        with open(os.path.join(results, 'history' + str(kf) + '.pkl'), 'wb') as f:
            pickle.dump(history.history, f, pickle.HIGHEST_PROTOCOL)

        # Save Model
        model.save(os.path.join(results, 'model' + str(kf) + '.h5'))
        K.clear_session()
Exemplo n.º 26
0
	model.add(Convolution2D(nb_filters2, nb_conv2, nb_conv2, border_mode='valid'))
	model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
	model.add(Activation('tanh'))

	model.add(Convolution2D(nb_filters3, nb_conv3, nb_conv3, border_mode='valid'))
	#model.add(Dropout(0.5))

	model.add(Flatten())
	model.add(Dense(256))
	model.add(Activation('tanh'))
	model.add(Dropout(0.4))
	model.add(Dense(nb_classes))
	model.add(Activation('softmax'))

	optimizer=nadam()
	model.compile(loss='sparse_categorical_crossentropy',optimizer=optimizer, metrics=['accuracy'])

	model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1, validation_data=(X_test, Y_test))
	score = model.evaluate(X_test, Y_test, verbose=0)

	print('Test score:', score[0])
	print('Test accuracy:', score[1])


# file name to save model
filename='homus_cnn.h5'

# save network model
model.save(filename)
Exemplo n.º 27
0
 def create_optimizer_instance(self, **d):
     return optimizers.nadam(**d)
Exemplo n.º 28
0
from keras.optimizers import nadam
from keras.optimizers import  SGD,Adadelta
import keras.optimizers
from keras.wrappers.scikit_learn import KerasRegressor


model=Sequential()
model.add(Dense(3,activation='selu'))
model.add(Dense(5,activation='selu'))
model.add(Dense(7,activation='relu'))
model.add(Dense(11,activation='selu'))
model.add(Dense(5,activation='selu'))
#model.add(Dense(3,activation='selu'))
model.add(Dense(1,kernel_initializer='normal'))

nadam_mode=nadam(lr=0.01)
sgd=SGD( lr=0.001,momentum=0.3,decay=0.2)
adaDelta=Adadelta(lr=0.01, rho=0.95, epsilon=None, decay=0.0)
model.compile(loss='mean_squared_error', optimizer=adaDelta)

history=model.fit(x=X.values,y=y.values,batch_size=16,epochs=50,verbose=1)    

plt.plot(history.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()

test=pd.read_csv('Income_testing.csv')
test=test.drop(['ID'],axis=1)
Exemplo n.º 29
0
	x=layers.Dense(1,kernel_initializer='normal')(x)
	model=keras.Model(inputs=inlayer, outputs=x)

	# y=layers.Dense(300,activation='tanh')(inlayer)
	# y=layers.Dense(160,activation='tanh')(y)
	# y=layers.Dense(70,activation='tanh')(y)
	# m =layers.concatenate([x, y])
	# m=layers.Dense(350,activation='tanh')(m)
	# m=layers.Dense(350,activation='tanh')(m)
	# m=layers.Dense(768,activation='tanh')(m)
	# m=layers.Dense(1,kernel_initializer='normal')(m)
	# model=keras.Model(inputs=inlayer, outputs=m)

	print(model.summary())

	model.compile(loss='mean_squared_error',optimizer=nadam(lr=5e-5),metrics=['mse'])
	best_model=model.fit(np.array(train_x),column_or_1d(train_y),epochs=500,batch_size=32,validation_split=0.2,callbacks=[
	EarlyStopping(monitor='val_loss',min_delta=0,patience=16,verbose=1,mode='auto',baseline=None,restore_best_weights=True)
	])
	test_y=pd.DataFrame(model.predict(test_x),index=test_x.index,columns=["SalePrice"])
	test_y=reverse(test_y)
	test_y.to_csv(f"submission_{type(model).__name__}.csv",index=True,index_label="Id")

run_sklearn=True
if run_sklearn:
	scoring='neg_mean_squared_error'
	models=[
		# DecisionTreeRegressor(),
		# KNeighborsRegressor(),
		# GradientBoostingRegressor(),
		# AdaBoostRegressor(),
Exemplo n.º 30
0
x=model.layers[7].output
#take the first 5 layers of the model
x=Flatten()(x)
x=Dense(1024, activation="relu")(x)
x=Dropout(0.5)(x)
x=Dense(384, activation="relu")(x)
x=Dropout(0.5)(x)
x=Dense(96, activation="relu")(x)
x=Dropout(0.5)(x)
predictions = Dense(30, activation="softmax")(x)

model_final =Model(input=model.input, output=predictions)

#model_final = load_model("weights_Mobile_Net.h5")
model_final.compile(loss="categorical_crossentropy", optimizer=optimizers.nadam(lr=0.00001), metrics=["accuracy"])

train_datagen = ImageDataGenerator(rescale = 1./255,
                                   shear_range = 0.2,
                                   zoom_range = 0.2,
                                   horizontal_flip = True,
                                   fill_mode="nearest",
                                   width_shift_range=0.3,
                                   height_shift_range=0.3,
                                   rotation_range=30)

test_datagen = ImageDataGenerator(rescale = 1./255,
horizontal_flip = True,
fill_mode = "nearest",
zoom_range = 0.3,
width_shift_range = 0.3,
Exemplo n.º 31
0
import numpy as np
from sklearn.model_selection import train_test_split

# pickle_in = open("cooked/data.pickle","rb")
# x_train, y_train = pickle.load(pickle_in)
# x_train = x_train.reshape(-1,120,1)
# x_train /= 3000
# y_train /= 4000
#
# x_train, x_val, y_train, y_val = x_train[:10000], x_train[10000:], y_train[:10000], y_train[10000:]

model = Sequential()
model.add(
    Convolution1D(16,
                  kernel_size=11,
                  strides=4,
                  activation=elu,
                  input_shape=(120, 1)))
model.add(Reshape((16, 28)))
model.add(LSTM(16, activation=sigmoid))
model.add(Dense(1))
model.compile(optimizer=nadam(), loss=mean_absolute_error)

# def train_for_epochs(n):
#     for i in range(n):
#         x_noise = np.random.normal(scale=0.05, size=x_train.shape)
#         y_noise = np.random.normal(scale=0.05, size=y_train.shape)
#         model.fit(x_train + x_noise, y_train + y_noise, epochs=3)
#         print(model.evaluate(x_val, y_val))

# model.save("models/cnn")