Ejemplo n.º 1
0
def model_train(first_conv, second_conv):
    model = Sequential()
    model.add(AveragePooling2D(pool_size=(2, 2), input_shape=(300, 300, 3)))
    model.add(Convolution2D(first_conv, (3, 3), activation='relu'))
    model.add(AveragePooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(second_conv, (3, 3), activation='relu'))
    model.add(AveragePooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(20, (3, 3), activation='relu'))
    model.add(AveragePooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(5, (3, 3), activation='relu'))
    model.add(AveragePooling2D(pool_size=(2, 2)))
    model.add(Flatten()),
    model.add(Dense(32, activation='relu')),
    model.add(Dense(4, activation='relu'))
    model.build()

    model.compile(loss="mean_squared_error", optimizer="adam")
    model.fit(all_im,
              all_lab,
              epochs=150,
              validation_split=0.1,
              shuffle=True,
              verbose=1,
              callbacks=[es])
    model.save('my_model_' + str(first_conv) + '_' + str(second_conv) + '.h5')
Ejemplo n.º 2
0
def define_chosen(oldmodel, index, conv_indexes, trinable_indexes):
    newmodel = clone_model(oldmodel)
    newmodel.set_weights(oldmodel.get_weights())


    indexed_layer = conv_indexes[index-1    ]


    layers = newmodel.layers[indexed_layer:]
    model = Sequential(layers)
    model.build(newmodel.layers[indexed_layer-1].output_shape)
    # model.summary()

    last_nont_trainable = conv_indexes[index-1]

    for i in trinable_indexes[:trinable_indexes.index(last_nont_trainable) ]:
        model._layers[i].trainable = trainable

    learning_rate = 0.1
    lr_decay = 1e-6
    sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                 optimizer=sgd,
                 metrics=['accuracy'])
    print("Setting weights")
    for i in trinable_indexes[:trinable_indexes.index(indexed_layer)+1]:
        model._layers[i].set_weights(oldmodel.layers[i].get_weights())
        print(i)

    return model
Ejemplo n.º 3
0
def define_chosen(oldmodel, index, conv_indexes, trinable_indexes):
    newmodel = clone_model(oldmodel)
    newmodel.set_weights(oldmodel.get_weights())


    indexed_layer = conv_indexes[index-1    ]


    layers = newmodel.layers[indexed_layer:]
    model = Sequential(layers)
    model.build(newmodel.layers[indexed_layer-1].output_shape)
    # model.summary()

    last_nont_trainable = conv_indexes[index-1]

    for i in trinable_indexes[:trinable_indexes.index(last_nont_trainable) ]:
        model._layers[i].trainable = trainable

    model.compile(loss='categorical_crossentropy',
                  optimizer="adam",
                  metrics=['accuracy'])
    print("Setting weights")
    for i in trinable_indexes[:trinable_indexes.index(indexed_layer)+1]:
        model._layers[i].set_weights(oldmodel.layers[i].get_weights())


    return model
Ejemplo n.º 4
0
class KerasModel:
    def __init__(self, input_size, hidden_size1, hidden_size2):
        self.model = Sequential()
        self.model.add(keras.layers.Dense(hidden_size1, use_bias=False))
        self.model.add(keras.layers.Dense(hidden_size2, use_bias=False))
        self.model.compile(keras.optimizers.Adam(0.01))
        self.model.build((1, input_size))

    def forward(self, x):
        return self.model.predict(x)
Ejemplo n.º 5
0
 def __init__(self, size_list, activation="tanh"):
     """"The shape of the layers are one dimensional and taken as a list, from input size to output size."""
     assert(len(size_list) > 1)
     model = Sequential()
     for i in range(1, len(size_list)-1):
         model.add(Dense(size_list[i], input_shape=(size_list[i-1],)))
     model.add(Dense(size_list[len(size_list)-1], activation=activation))
     model.build(input_shape=(size_list[0],))
     self.model = model
     self.size_list = size_list
     self.opt_state = []
Ejemplo n.º 6
0
def define_one_but_last_training(oldmodel):
    newmodel = clone_model(oldmodel)
    newmodel.set_weights(oldmodel.get_weights())

    layers = newmodel.layers[-7:]
    model = Sequential(layers)
    model.build(newmodel.layers[-8].output_shape)
    # model.summary()
    model._layers[-1].trainable = trainable
    model.compile(loss='categorical_crossentropy',
                  optimizer="adam",
                  metrics=['accuracy'])

    model._layers[-1].set_weights(oldmodel.layers[-1].get_weights())
    model._layers[-5].set_weights(oldmodel.layers[-5].get_weights())
    model._layers[-7].set_weights(oldmodel.layers[-7].get_weights())

    return model
Ejemplo n.º 7
0
    def _create_model(self, learning_rate):
        model = Sequential()

        model.add(Conv3D(64, (3, 3, 3)))
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

        model.add(Conv3D(128, (3, 3, 3)))
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

        model.add(Conv3D(256, (3, 3, 3)))
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

        model.add(Conv3D(512, (3, 3, 3)))
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

        model.add(Flatten())

        # Now the dense classifier
        # model.add(Dropout(0.4))
        model.add(Dense(100))
        # model.add(Dropout(0.4))
        model.add(Dense(100))
        # model.add(Dropout(0.4))
        # model.add(Dropout(0.4))
        model.add(Dense(1, activation='softmax'))

        model.build(input_shape=(None, 200, 192, 192, 3))
        model.summary()  ## TODO make this be called seperately.

        opt = keras.optimizers.SGD(lr=learning_rate)
        model.compile(loss='binary_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy', 'mean_squared_error'])

        self._model = model

        return model
    print("Initializing Model")
    with strategy.scope():
        back_layers_o = load_model(
            "/media/0/Network/VGG16/pretrained_model/fc123_model.h5")
        back_layers_o.layers[1].trainable = False
        back_layers_o.layers[3].trainable = False
        back_layers_o.load_weights(
            "/media/0/Network/VGG16/pretrained_model/fc123_weights.h5")
        back_layers = Sequential()
        for layers in back_layers_o.layers:
            back_layers.add(layers)
        sgd_optimizer = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
        back_layers.compile(optimizer=sgd_optimizer,
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])
        back_layers.build([None, 7, 7, 512])
        back_layers.summary()

    print("Training start")
    for Epoch in range(1, 11):
        file_name = "/media/1/Network/VGG16/weights/fc/2/" + str(
            Packet_index + 1) + "_" + str(Epoch) + ".h5"
        if os.path.exists(file_name):
            continue
        print("[ ", Packet_index + 1, ":",
              Packet_index + Packet_number_to_erase, " ] Packet error with [ ",
              Epoch, " ] Epochs training")
        back_layers.fit(training_feature_with_error,
                        training_label,
                        batch_size=256,
                        epochs=1,
Ejemplo n.º 9
0
        data = "%f|" % acc
        f.write(data)
        gc.collect()

        #Loading Retrained Model
        for Epoch in range(1, 11):
            file_name = "../Back_layers_weights/" + str(
                Packet_number_to_erase) + "_packet_error_weights/" + str(
                    Packet_index + 1) + "_Packet_error_" + str(Epoch) + ".h5"
            print("load ", file_name)
            original_retrained_back_layers = load_model(file_name)
            retrained_back_layers = Sequential(
                [layer for layer in original_retrained_back_layers.layers[:2]])
            retrained_back_layers.add(original_back_layers.layers[2])
            retrained_back_layers.add(original_back_layers.layers[3])
            retrained_back_layers.build([None, 7, 7, 512])
            retrained_back_layers.compile(optimizer=sgd_optimizer,
                                          loss='categorical_crossentropy',
                                          metrics=['accuracy'])
            #retrained_back_layers.summary()
            original_retrained_back_layers = None
            del original_retrained_back_layers
            gc.collect()

            #Retrained model with feature_map error
            print("retrained back_layer with [", Packet_index + 1, ":",
                  Packet_index + Packet_number_to_erase,
                  "]th Packet error trained with [", Epoch, "] Epoches")
            loss, acc = retrained_back_layers.evaluate(test_data_with_error,
                                                       label)
            data = "%f|" % acc
Ejemplo n.º 10
0
import tensorflow as tf
from tensorflow import keras
from keras import layers, Sequential, optimizers, losses

# 首先通过 compile 函数指定网络使用的优化器对象,损失函数,评价指标等:
# 导入优化器,损失函数模块
# 采用 Adam 优化器,学习率为 0.01;采用交叉熵损失函数,包含 Softmax
# 创建 5 层的全连接层网络
network = Sequential([
    layers.Dense(256, activation='relu'),
    layers.Dense(128, activation='relu'),
    layers.Dense(64, activation='relu'),
    layers.Dense(32, activation='relu'),
    layers.Dense(10)
])
network.build(input_shape=(None, 28 * 28))
network.summary()
network.compile(
    optimizer=optimizers.Adam(lr=0.01),
    loss=losses.CategoricalCrossentropy(from_logits=True),
    metrics=['accuracy']  # 设置测量指标为准确率
)
# 8.2.2模型训练
# 模型装配完成后,即可通过 fit()函数送入待训练的数据和验证用的数据集
# 指定训练集为 train_db,验证集为 val_db,训练 5 个 epochs,每 2 个 epoch 验证一次
# 返回训练信息保存在 history 中
history = network.fit(train_db,
                      epochs=5,
                      validation_data=val_db,
                      validation_freq=2)
# 其中 train_db 为 tf.data.Dataset 对象,也可以传入 Numpy Array 类型的数据;epochs 指定训 练迭代的 epochs 数;validation_data 指定用于验证(测试)的数据集和验证的频率 validation_freq。 运行上述代码即可实现网络的训练与验证的功能,fit 函数会返回训练过程的数据记录 history,其中 history.history 为字典对象,包含了训练过程中的 loss,测量指标等记录项:
Ejemplo n.º 11
0
model.add(Activation('relu'))
model.add(Conv2D(16, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(32, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Conv2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('softmax'))

model.summary()
model.build(input_shape=(500, 500, 1))
model.compile(optimizer='adam',
              loss="categorical_crossentropy",
              metrics=['accuracy'])
model.fit(x, y, steps_per_epoch=2, epochs=30)

model.save("first.h5")
# recognizer.train(x_train,np.array(y_labels))
# recognizer.save("trainner.yml")
Ejemplo n.º 12
0
import math
import sys
import gc

tf.random.set_seed(960312)
"""
이전에 pooling4까지 뽑았던 feature를 이용해서
pooling 4에서 pooling 5사이에서 feature를 뽑는다.
"""

#Spliting VGG
pretrained_model = load_model("../pretrained_model/vgg_model.h5")
front_layers = Sequential([layer for layer in pretrained_model.layers[15:19]
                           ])  # max_pooling 4까지 진행
#SGD_optimizer = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
front_layers.build((None, 14, 14, 512))
#front_layers.compile(optimizer=SGD_optimizer, loss='categorical_crossentropy',metrics=['accuracy'])
front_layers.summary()

########################### training 시작!!!!!!!!!!!!!!!!!!
print("----training start!!!!----")
features = np.load("../extracted_feature/whole_shuffle/train_features_0.npy")
label = np.load("../extracted_feature/whole_shuffle/train_label_0.npy")

Number_of_features = len(features) / 3000
Number_of_features = int(Number_of_features)
features_predict = []

# 한번에 GPU에 다 안올라가서 쪼개서 넣기
for i in range(0, Number_of_features + 1):
    features_predict.extend(
Ejemplo n.º 13
0
value_list_training = value_list[:split]
value_list_test = value_list[split:]

image_training_array = np.asarray(image_list_training)
image_test_array = np.asarray(image_list_test)

value_training_array = np.asarray(value_list_training)
value_test_array = np.asarray(value_list_test)

# DEFINE THE MODEL

model = Sequential()
model.add(MaxPooling2D(pool_size=(4,4),input_shape=(300,300,3)))
model.add(Convolution2D(6, (5,5),activation='relu'))
model.add(Convolution2D(4, (5,5),activation='relu'))
model.add(MaxPooling2D(pool_size=(3,3)))
model.add(Flatten()),
model.add(Dense(16, activation='relu')),
model.add(Dense(16, activation='relu')),
model.add(Dense(4,activation='relu'))
model.build()

model.compile(loss= "mean_squared_error" , optimizer="adam")

# TRAIN THE MODEL

model.fit(image_training_array, value_training_array, batch_size=200, epochs=1000)

model.save('my_model_5.h5')
Ejemplo n.º 14
0
class AutoEncoder:
    def __init__(self,
                 date_range,
                 symbol="AAPL",
                 data_file="calibration_data"):
        self.data = None
        for day in date_range:
            path = "fundamental_{}_{}.bz2".format(symbol,
                                                  day.strftime("%Y%m%d"))
            path = os.path.join(data_file, path)
            if os.path.exists(path):
                prices = pd.read_pickle(path, compression="bz2")
                if self.data is None:
                    self.data = prices.values.T
                else:
                    self.data = np.vstack([self.data, prices.values.T])
        scaler = MinMaxScaler()
        self.data_scaled = np.array(
            [scaler.fit_transform(d.reshape(-1, 1)) for d in self.data])
        self.data_scaled = self.data_scaled[:, :, 0]
        print("The data shape is", self.data_scaled.shape)

    def build_model(self, encode_length=16, activation="relu"):
        n_in = self.data_scaled.shape[1]
        self.encode_length = encode_length

        self.model = Sequential()
        self.model.add(Dense(128, activation=activation, name="encoder_l1"))
        self.model.add(Dense(64, activation=activation, name="encoder_l2"))
        self.model.add(
            Dense(encode_length, name="encoder_output", activation=None))
        self.model.add(Dense(64, activation=activation))
        self.model.add(Dense(128, activation=activation))
        self.model.add(Dense(n_in, activation=None))

        self.model.compile(optimizer='adam', loss='mse')
        self.model.build()

        return self.model

    def _reshape_data(self, data):
        if len(data.shape) == 3:
            return data
        if len(data.shape) == 2:
            return data[:, :, np.newaxis]
        if len(data.shape) == 1:
            return data[np.newaxis, :, np.newaxis]

    def train_model(self,
                    test_size=0.1,
                    val_size=0.1,
                    batch_size=16,
                    epochs=200,
                    stop_patience=10,
                    plot_test=True,
                    plot_history=True):
        x = self.data_scaled
        if test_size != 0.:
            x_train, x_test, y_train, y_test = train_test_split(
                x, x, test_size=test_size, random_state=42)
            print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
        else:
            x_train, y_train = x, x

        early_stopping = EarlyStopping(monitor='val_loss',
                                       patience=stop_patience,
                                       mode="min",
                                       verbose=2,
                                       restore_best_weights=True)
        result = self.model.fit(x_train,
                                y_train,
                                batch_size=batch_size,
                                epochs=epochs,
                                validation_split=val_size / (1 - test_size),
                                callbacks=[early_stopping])
        if plot_test:
            y_test_predict = self.model.predict(x_test)
            print(
                "test loss:",
                np.sum((y_test_predict - y_test)**2) /
                (y_test.shape[0] * y_test.shape[1]))
            plt.plot(y_test[0])
            plt.plot(y_test_predict[0])
            plt.ylabel("Scaled Price")
            plt.xlabel("Minutes")
            plt.title("Encode length {}".format(self.encode_length))
            plt.legend(["Real", "Predict"])
            plot_name = "sample"
            plt.savefig('{}_{}.png'.format(plot_name, self.encode_length))
            plt.show()
        if plot_history:
            self.loss_plot(result.history)

        return result

    def loss_plot(self, history, plot_name='Loss'):
        loss = np.asarray(history['loss'])
        val_loss = np.asarray(history['val_loss'])
        plt.style.use('seaborn')
        plt.figure(figsize=(12, 9), dpi=100)
        plt.grid(True)
        plt.plot(loss)
        plt.plot(val_loss)
        plt.legend(['loss', 'val_loss'])
        plt.title("Encode length {}".format(self.encode_length))
        plt.xlabel("Epochs")
        plt.ylabel("MSE")
        plt.savefig('{}_{}.png'.format(plot_name, self.encode_length))
        plt.show()

    def save_feature(self, plot_feature=False):
        feature_name = "AutoEncoderFeature_{}.npy".format(self.encode_length)
        encoder = Model(inputs=self.model.input,
                        outputs=self.model.get_layer('encoder_output').output)
        feature = encoder.predict(self.data_scaled)
        np.save("feature/" + feature_name, feature)

        if plot_feature:
            if self.encode_length == 8:
                fig, ax = plt.subplots(ncols=4, nrows=2, figsize=(12, 9))
                axes = ax.flatten()
                for i in range(feature.shape[1]):
                    sns.distplot(feature[:, i], ax=axes[i])
                plt.show()
                return

            for i in range(feature.shape[1]):
                sns.distplot(feature[:, i])
                plt.show()
            return

    def save_model(self):
        self.model.save("model/AutoEncoder_{}.h5".format(self.encode_length))

    def save_encoder_ws(self):
        w1, b1 = self.model.get_layer('encoder_l1').get_weights()
        w2, b2 = self.model.get_layer('encoder_l2').get_weights()
        w3, b3 = self.model.get_layer('encoder_output').get_weights()
        with open("model/AutoEncoder_w_{}.h5".format(self.encode_length),
                  "wb") as f:
            pickle.dump([w1, b1, w2, b2, w3, b3], f)

    def encode(self, x):
        encoder = Model(inputs=self.model.input,
                        outputs=self.model.get_layer('encoder_output').output)
        return encoder.predict(x)
Ejemplo n.º 15
0
Archivo: t2.py Proyecto: swq90/python
# 利用切分的训练集数据构建数据集对象
X_train, X_test, y_train, y_test = train_test_split(normed_train_data,
                                                    y,
                                                    test_size=TEST_SIZE,
                                                    random_state=42)

X_train = X_train.astype('float32')
X_test = X_test.astype('float32')

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

#
# 创建 n 层的全连接层网络
network = Sequential([layers.Dense(10, activation='relu'), layers.Dense(2)])
network.build(input_shape=(None, 40))
network.summary()

# 设置测量指标为准确率,学习率0.01,softmax为损失函数
network.compile(optimizer=optimizers.Adam(lr=0.01),
                loss=losses.CategoricalCrossentropy(from_logits=True),
                metrics=['accuracy'])

summary_writer = tf.summary.create_file_writer(os.getcwd())

# 指定训练集为 train_db,验证集为 val_db,训练 200 个 epochs,每 5 个 epoch 验证一次 # 返回训练信息保存在 history 中
history = network.fit(X_train,
                      y_train,
                      batch_size=32,
                      epochs=10,
                      validation_data=(X_test, y_test))