示例#1
0
plt.plot(testPredictPlot)
plt.show()

# In[ ]:

plt.plot(testPredictPlot)
plt.show()

# In[ ]:

from keras.utils import plot_model
plot_model(loaded_model, to_file='model.png')
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

SVG(model_to_dot(loaded_model).create(prog='dot', format='svg'))
plot_model(loaded_model, to_file='model_plot.png')

# In[ ]:

SVG(model_to_dot(loaded_model).create(prog='dot', format='svg'))

# In[ ]:

plt.plot(y_test)
plt.plot(p)
plt.legend(['testY', 'p'], loc='upper right')
plt.title(mean_squared_error(y_test, p))
# plt.savefig(datetime.datetime.now())
plt.show()
print(mean_squared_error(y_test, p))
def plot_keras_model(model, show_shapes=True, show_layer_names=True):
    from IPython.display import SVG
    from keras.utils.vis_utils import model_to_dot
    return SVG(model_to_dot(model, show_shapes=show_shapes, show_layer_names=show_layer_names).create(prog='dot', format='svg'))
示例#3
0
def show_model(model):
    return SVG(
        model_to_dot(model, show_shapes=True,
                     show_layer_names=False).create(prog='dot', format='svg'))
示例#4
0
def visualize_keras_model(model):
    return SVG(model_to_dot(model).create(prog='dot', format='svg'))
示例#5
0
print(scaler_x.fit(x))
xscale = scaler_x.transform(x)
print(scaler_y.fit(y))
yscale = scaler_y.transform(y)
X_train, X_test, Y_train, Y_test = train_test_split(xscale, yscale)

GK2020_Ver1 = GK2020(x.shape[1:])
GK2020_Ver1.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
GK2020_Ver1.fit(X_train, Y_train, epochs=500, batch_size=50, verbose=1)

# Model Evaluation
preds = GK2020_Ver1.evaluate(x=xscale, y=yscale)
print()
print("Loss=", str(preds[0]))
print("Test Accuracy=", str(preds[1]))

# Model Information
GK2020_Ver1.summary()
plot_model(GK2020_Ver1, to_file='GK2020_Ver1.png')
SVG(model_to_dot(GK2020_Ver1).create(prog='dot', format='svg'))

# Model Prediction for graph generation
ynew = GK2020_Ver1.predict(xscale)
y_pred = scaler_y.inverse_transform(ynew)
for i in range(0, len(y_pred)):
    print(y_pred[i, 0])

# # Save the model!
# GK2020_Ver1.save_weights('results/Models_Trained/GK2020_Ver1_weights (400 epochs, 2tanh + 1linear).h5')
# GK2020_Ver1.save('results/Models_Trained/GK2020_Ver1 (400 epochs, 2tanh + 1linear).h5')
示例#6
0
def plotm(model):
    display(
        SVG(
            model_to_dot(model, show_shapes=True).create(prog='dot',
                                                         format='svg')))
示例#7
0
!pip install pydot==1.2.3

import pydot
from keras.utils.vis_utils import plot_model

pydot.__version__

#check out the issue, the code is not working
plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)

#check out the issue, the code is not working
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

SVG(model_to_dot(classifier).create(prog='dot', format='svg'))

classifier.layers

for layer in classifier.layers:
    print layer.input_shape #how the input data is coming to hidden/dense layers

"""You can get to know that: there is no activation function present "in" the first layer neurons at all. the very first layer is your raw data so no activation function. 

The next layer (i.e. the 1st hidden layer) applies the activation function as well as all subsequent layers.
"""

for layer in classifier.layers:
    print layer.output_shape

"""# Classification using Iris Dataset"""
示例#8
0
def get_model(X, y):
    ####################################
    # Model

    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.33,
                                                        random_state=42)
    print(X_train.shape, Y_train.shape)
    print(X_test.shape, Y_test.shape)

    max_fatures = 2000
    embed_dim = 128
    lstm_out = 196

    model = Sequential()
    model.add(Embedding(max_fatures, embed_dim, input_length=X.shape[1]))
    model.add(SpatialDropout1D(0.4))
    model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dense(2, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    SVG(model_to_dot(model).create(prog='dot', format='svg'))

    # Training
    batch_size = 32
    history = model.fit(X_train,
                        Y_train,
                        epochs=25,
                        batch_size=batch_size,
                        verbose=2)

    # Testing the model
    validation_size = 1500

    X_validate = X_test[-validation_size:]
    Y_validate = Y_test[-validation_size:]
    X_test = X_test[:-validation_size]
    Y_test = Y_test[:-validation_size]
    score, acc = model.evaluate(X_test,
                                Y_test,
                                verbose=2,
                                batch_size=batch_size)
    print("score: %.2f" % (score))
    print("acc: %.2f" % (acc))

    pos_cnt, neg_cnt, pos_correct, neg_correct = 0, 0, 0, 0
    for x in range(len(X_validate)):

        result = model.predict(X_validate[x].reshape(1, X_test.shape[1]),
                               batch_size=1,
                               verbose=2)[0]

        if np.argmax(result) == np.argmax(Y_validate[x]):
            if np.argmax(Y_validate[x]) == 0:
                neg_correct += 1
            else:
                pos_correct += 1

        if np.argmax(Y_validate[x]) == 0:
            neg_cnt += 1
        else:
            pos_cnt += 1

    print("Sarcasm_acc", pos_correct / pos_cnt * 100, "%")
    print("Non-Sarcasm_acc", neg_correct / neg_cnt * 100, "%")
    return model
示例#9
0
mdp_third_conv2d = Conv2D(64, (3, 3), activation='relu',
                          strides=(1, 1))(mdp_second_conv2d)
mdp_flatten = Flatten()(mdp_third_conv2d)
mdp_first_dense = Dense(512, activation='relu')(mdp_flatten)

dfa_input = Input(shape=dfa_emb_shape, name='DFA_EMB')
#selector_input = Input(shape=dfa_emb_shape, name='StateSelector')
#elem_mult = multiply([dfa_input, selector_input], trainable=False, name='Elem-wise')
#dfa_flatten = Flatten()(dfa_input)

merged = concatenate([mdp_first_dense, dfa_input], axis=1)
output = Dense(nb_actions, activation='linear')(merged)

model = Model(inputs=[mdp_input, dfa_input], outputs=output)
print_summary(model)
model_dot = model_to_dot(model, show_shapes=True, show_layer_names=True)
print(model_dot, file=open('model_arch.dot', 'w+'))

# Regexer
URL = 'http://localhost:8080'
service = 'regex2dfa2dot'
letter = 'q'
regex = '[0-3]*(23){2}[0-3]*'
regexer = Regexer(URL, service)

dfa_triple = regexer.to_dfa(regex)

# Finally, we configure and compile our agent. You can use every
# built-in Keras optimizer and even the metrics!

memory = SequentialMemory(limit=1000000, window_length=WINDOW_LENGTH)
示例#10
0
onehot_train = keras.utils.to_categorical(y_train, num_classes=10)
onehot_test = keras.utils.to_categorical(y_test, num_classes=10)

model.add(Conv2D(64, (5, 5), input_shape=(32, 32, 3)))
model.add(Activation('relu'))
model.add(MaxPool2D([3, 3], 2))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPool2D([3, 3], 2))
model.add(Flatten())
model.add(Dense(384, activation='relu'))
model.add(Dense(192, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
from keras.utils.vis_utils import plot_model

plot_model(model, to_file="keras_alexnet.png")

from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

SVG(model_to_dot(model).create(prog="dot", format='svg'))

from keras import optimizers

sgd = optimizers.SGD(lr=0.01, momentum=0.9)
model.compile(optimizer=sgd,
              loss="categorical_crossentropy",
              metrics=['accuracy'])
model.fit(x=X_train, y=onehot_train, epochs=50, batch_size=20)
model.evaluate(X_test, onehot_test, batch_size=128)
示例#11
0
x1 = Dropout(0.5)(x1)
x2 = Dropout(0.5)(x2)
x3 = Dropout(0.5)(x3)
x4 = Dropout(0.5)(x4)
x5 = Dropout(0.5)(x5)

x1 = Dense(num_classes, activation='softmax', name='x1')(x1)
x2 = Dense(num_classes, activation='softmax', name='x2')(x2)
x3 = Dense(num_classes, activation='softmax', name='x3')(x3)
x4 = Dense(num_classes, activation='softmax', name='x4')(x4)
x5 = Dense(num_classes, activation='softmax', name='x5')(x5)

model = Model(inputs=a, outputs=[x1, x2, x3, x4, x5, numout])

display(SVG(model_to_dot(model).create(prog='dot', format='svg')))
# load weights
# model.load_weights("weights.forksvhmbest.hdf5")

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

# In[42]:

# checkpoint
filepath = "weights.forksvhmbest.hdf5"
checkpointer = ModelCheckpoint(filepath, verbose=1, save_best_only=True)
callbacks_list = [checkpointer]

# In[43]:
示例#12
0
shipModel.fit(x = X_train, y = y_train, epochs = 30, batch_size = 32)
shipModel.save('/Users/vlad/Projects/ships-in-satellite-imagery/shipModel.h5')
#shipModel = load_model('/Users/vlad/Projects/ships-in-satellite-imagery/shipModel.h5')


#Test/evaluate the model
preds = shipModel.evaluate(x = X_test, y = y_test)
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))

    
shipModel.summary()

plot_model(shipModel, to_file='ShipModel.png')
SVG(model_to_dot(shipModel).create(prog='dot', format='svg'))





#CREATE A DICTIONARY OF TYPES OF IMAGES
class_dict = {0:'Ship: no', 1:'Ship: yes'}

#CLASSIFY IMAGES BY SHIP OR NO SHIP
def classify_random_image():
    from PIL import Image
    import random
    import imageio
    image_path = filename[random.randrange(0,len(filename),1)][0]
    image = Image.open(image_path)
示例#13
0
smile_detection = model(X_train.shape[1:])
smile_detection.compile(optimizer="Adam",
                        loss="binary_crossentropy",
                        metrics=["accuracy"])

smile_detection.fit(x=X_train, y=Y_train, epochs=50, batch_size=50)

predict = smile_detection.evaluate(x=X_test, y=Y_test)
print()
print("Loss = " + str(predict[0]))
print("Test Accuracy = " + str(predict[1]))

image_path = [str(i) + ".jpg" for i in range(1, 9)]

for it in image_path:
    img = image.load_img(it, target_size=(64, 64))
    imshow(img)

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    print(smile_detection.predict(x))

smile_detection.summary()
#this is such a cool function.

plot_model(smile_detection, to_file='HappyModel.png')
SVG(model_to_dot(smile_detection).create(prog='dot', format='svg'))
示例#14
0
from keras import models, layers
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model

# 启动神经网络
network = models.Sequential()

# 添加使用ReLU激活函数的全连接层
network.add(layers.Dense(units=16, activation="relu", input_shape=(10, )))

# 添加使用ReLU激活函数的全连接层
network.add(layers.Dense(units=16, activation="relu"))

# 添加使用sigmoid激活函数的全连接层
network.add(layers.Dense(units=1, activation="sigmoid"))

# 可视化神经网络结构
SVG(model_to_dot(network, show_shapes=True).create(prog="dot", format="svg"))

# 将可视化后的网络结构图保存为文件
plot_model(network, show_shapes=True, to_file="network_normal.png")

# 下面展示一个更简单的神经网络
SVG(model_to_dot(network, show_shapes=False).create(prog="dot", format="svg"))
plot_model(network, show_shapes=False, to_file="network_simplicity.png")
示例#15
0
print(model.predict(x))


# You can also print a summary of your model by running the following code.

# In[ ]:

model.summary()


# Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to "File -> Open...-> model.png".

# In[ ]:

plot_model(model, to_file='model.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))


# <font color='blue'>
# **What you should remember:**
# - Very deep "plain" networks don't work in practice because they are hard to train due to vanishing gradients.  
# - The skip-connections help to address the Vanishing Gradient problem. They also make it easy for a ResNet block to learn an identity function. 
# - There are two main type of blocks: The identity block and the convolutional block. 
# - Very deep Residual Networks are built by stacking these blocks together.

# ### References 
# 
# This notebook presents the ResNet algorithm due to He et al. (2015). The implementation here also took significant inspiration and follows the structure given in the github repository of Francois Chollet: 
# 
# - Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun - [Deep Residual Learning for Image Recognition (2015)](https://arxiv.org/abs/1512.03385)
# - Francois Chollet's github repository: https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
示例#16
0
    # Compile model
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model


# %%
num_labels = y.shape[1]  # Toral number of output labels
num_inputs = X.shape[1]  # Total number 0f input variables

model_inst = buildModel(num_inputs, num_labels)

SVG(
    model_to_dot(model_inst, show_shapes=True,
                 show_layer_names=True).create(prog='dot', format='svg'))

early_stop = EarlyStopping(monitor='val_loss',
                           min_delta=0,
                           patience=2,
                           verbose=0,
                           mode='auto')

# 80% training set, 20% validation set

x_val = X[4348:]
X = X[:4348]

y_val = y[4348:]
y = y[:4348]
示例#17
0
def plot_model_graph(model):
    plot_model(model, to_file='model.png')
    SVG(model_to_dot(model).create(prog='dot', format='svg'))
示例#18
0
文件: test.py 项目: wenyifeng1123/GUI
    def on_wyInputData_clicked(self):
        self.inputData_name = QFileDialog.getOpenFileName(
            self, 'Choose the file', '.', 'H5 files(*.h5)')[0]
        if len(self.inputData_name) == 0:
            pass
        else:
            if len(self.openfile_name) != 0:
                self.horizontalSliderPatch.hide()
                self.horizontalSliderSlice.hide()
                self.labelPatch.hide()
                self.labelSlice.hide()
                self.lcdNumberSlice.hide()
                self.lcdNumberPatch.hide()
                self.matplotlibwidget_static.mpl.fig.clf()

                self.inputData = h5py.File(self.inputData_name, 'r')
                # the number of the input
                for i in self.inputData:
                    if i == 'X_test_p2' or i == 'y_test_p2':
                        self.twoInput = True
                        break

                if self.inputData['X_test'].ndim == 4:
                    self.modelDimension = '2D'
                    X_test = self.inputData['X_test'][:, 2052:2160, :, :]
                    X_test = np.transpose(np.array(X_test), (1, 0, 2, 3))
                    self.subset_selection = X_test

                    if self.twoInput:
                        X_test_p2 = self.inputData[
                            'X_test_p2'][:, 2052:2160, :, :]
                        X_test_p2 = np.transpose(np.array(X_test_p2),
                                                 (1, 0, 2, 3))
                        self.subset_selection_2 = X_test_p2

                elif self.inputData['X_test'].ndim == 5:
                    self.modelDimension = '3D'
                    X_test = self.inputData['X_test'][:, 0:20, :, :, :]
                    X_test = np.transpose(np.array(X_test), (1, 0, 2, 3, 4))
                    self.subset_selection = X_test

                    if self.twoInput:
                        X_test_p2 = self.inputData['X_test_p2'][:,
                                                                0:20, :, :, :]
                        X_test_p2 = np.transpose(np.array(X_test_p2),
                                                 (1, 0, 2, 3, 4))
                        self.subset_selection_2 = X_test_p2

                else:
                    print('the dimension of X_test should be 4 or 5')

                if self.twoInput:
                    self.radioButton_3.show()
                    self.radioButton_4.show()

                plot_model(self.model, 'model.png')
                if self.twoInput:
                    self.modelInput = self.model.input[0]
                    self.modelInput2 = self.model.input[1]
                else:
                    self.modelInput = self.model.input

                self.layer_index_name = {}
                for i, layer in enumerate(self.model.layers):
                    self.layer_index_name[layer.name] = i

                for i, layer in enumerate(self.model.input_layers):

                    get_activations = K.function(
                        [layer.input, K.learning_phase()], [
                            layer.output,
                        ])

                    if i == 0:
                        self.act[layer.name] = get_activations(
                            [self.subset_selection, 0])[0]
                    elif i == 1:
                        self.act[layer.name] = get_activations(
                            [self.subset_selection_2, 0])[0]
                    else:
                        print('no output of the input layer is created')

                for i, layer in enumerate(self.model.layers):
                    # input_len=layer.input.len()
                    if hasattr(layer.input, "__len__"):
                        if len(layer.input) == 2:
                            inputLayerNameList = []
                            for ind_li, layerInput in enumerate(layer.input):
                                inputLayerNameList.append(
                                    self.simpleName(layerInput.name))

                            get_activations = K.function([
                                layer.input[0], layer.input[1],
                                K.learning_phase()
                            ], [
                                layer.output,
                            ])
                            self.act[layer.name] = get_activations([
                                self.act[inputLayerNameList[0]],
                                self.act[inputLayerNameList[1]], 0
                            ])[0]

                        elif len(layer.input) == 3:
                            inputLayerNameList = []
                            for ind_li, layerInput in enumerate(layer.input):
                                inputLayerNameList.append(
                                    self.simpleName(layerInput.name))

                            get_activations = K.function([
                                layer.input[0], layer.input[1], layer.input[2],
                                K.learning_phase()
                            ], [
                                layer.output,
                            ])
                            self.act[layer.name] = get_activations([
                                self.act[inputLayerNameList[0]],
                                self.act[inputLayerNameList[1]],
                                self.act[inputLayerNameList[2]], 0
                            ])[0]

                        elif len(layer.input) == 4:
                            inputLayerNameList = []
                            for ind_li, layerInput in enumerate(layer.input):
                                inputLayerNameList.append(
                                    self.simpleName(layerInput.name))

                            get_activations = K.function([
                                layer.input[0], layer.input[1], layer.input[2],
                                layer.input[3],
                                K.learning_phase()
                            ], [
                                layer.output,
                            ])
                            self.act[layer.name] = get_activations([
                                self.act[inputLayerNameList[0]],
                                self.act[inputLayerNameList[1]],
                                self.act[inputLayerNameList[2]],
                                self.act[inputLayerNameList[3]], 0
                            ])[0]

                        elif len(layer.input) == 5:
                            inputLayerNameList = []
                            for ind_li, layerInput in enumerate(layer.input):
                                inputLayerNameList.append(
                                    self.simpleName(layerInput.name))

                            get_activations = K.function([
                                layer.input[0], layer.input[1], layer.input[2],
                                layer.input[3], layer.input[4],
                                K.learning_phase()
                            ], [
                                layer.output,
                            ])
                            self.act[layer.name] = get_activations([
                                self.act[inputLayerNameList[0]],
                                self.act[inputLayerNameList[1]],
                                self.act[inputLayerNameList[2]],
                                self.act[inputLayerNameList[3]],
                                self.act[inputLayerNameList[4]], 0
                            ])[0]

                        else:
                            print('the number of input is more than 5')

                    else:
                        get_activations = K.function(
                            [layer.input, K.learning_phase()], [
                                layer.output,
                            ])
                        inputLayerName = self.simpleName(layer.input.name)
                        self.act[layer.name] = get_activations(
                            [self.act[inputLayerName], 0])[0]

                dot = model_to_dot(self.model,
                                   show_shapes=False,
                                   show_layer_names=True,
                                   rankdir='TB')
                if hasattr(self.model, "layers_by_depth"):
                    self.layers_by_depth = self.model.layers_by_depth
                elif hasattr(self.model.model, "layers_by_depth"):
                    self.layers_by_depth = self.model.model.layers_by_depth
                else:
                    print(
                        'the model or model.model should contain parameter layers_by_depth'
                    )

                maxCol = 0

                for i in range(len(self.layers_by_depth)):

                    for ind, layer in enumerate(
                            self.layers_by_depth[i]
                    ):  # the layers in No i layer in the model
                        if maxCol < ind:
                            maxCow = ind

                        if len(layer.weights) == 0:
                            w = 0
                        else:

                            w = layer.weights[0]
                            init = tf.global_variables_initializer()
                            with tf.Session() as sess_i:
                                sess_i.run(init)
                                # print(sess_i.run(w))
                                w = sess_i.run(w)

                        self.weights[layer.name] = w

                if self.modelDimension == '3D':
                    for i in self.weights:
                        # a=self.weights[i]
                        # b=a.ndim
                        if hasattr(self.weights[i], "ndim"):
                            if self.weights[i].ndim == 5:
                                self.LayerWeights[i] = np.transpose(
                                    self.weights[i], (4, 3, 2, 0, 1))
                        else:
                            self.LayerWeights[i] = self.weights[i]
                elif self.modelDimension == '2D':
                    for i in self.weights:
                        if hasattr(self.weights[i], "ndim"):

                            if self.weights[i].ndim == 4:
                                self.LayerWeights[i] = np.transpose(
                                    self.weights[i], (3, 2, 0, 1))
                        else:
                            self.LayerWeights[i] = self.weights[i]
                else:
                    print('the dimesnion of the weights should be 2D or 3D')

                self.show_layer_name()

                self.totalSS = len(self.subset_selection)

                # show the activations' name in the List
                slm = QStringListModel()
                slm.setStringList(self.qList)
                self.listView.setModel(slm)

            else:
                self.showChooseFileDialog()
# coding: utf-8
# Conv2Dを使ったCNNの例

# CNN Model 4 - two strides
# Kerasとその他ライブラリをインポート
from keras.models import Sequential, Model
from keras.layers import Conv2D
from keras.utils import np_utils

# SVGの表示に必要なライブラリのインポート
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

# Windows の場合は以下を追加
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'

# ストライド2の畳み込みニューラルネットワークのモデルを作成
model = Sequential()
model.add(
    Conv2D(filters=3,
           kernel_size=(3, 3),
           input_shape=(6, 6, 1),
           strides=2,
           name='Conv2D_1'))

# SVG形式でモデルを表示
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
示例#20
0
def plot_keras_model(model, show_shapes=True, show_layer_names=True):
    return SVG(
        model_to_dot(model,
                     show_shapes=show_shapes,
                     show_layer_names=show_layer_names).create(prog='dot',
                                                               format='svg'))

# In[68]:

w2v_dnn = construct_deepnn_architecture(num_input_features=500)

# ### Visualize sample deep architecture

# In[21]:

from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

SVG(
    model_to_dot(w2v_dnn,
                 show_shapes=True,
                 show_layer_names=False,
                 rankdir='TB').create(prog='dot', format='svg'))

# ### Model Training, Prediction and Performance Evaluation

# In[69]:

batch_size = 100
w2v_dnn.fit(avg_wv_train_features,
            y_train,
            epochs=5,
            batch_size=batch_size,
            shuffle=True,
            validation_split=0.1,
            verbose=1)
  def incresA(x,scale,name=None):
    pad = 'same'
    branch0 = conv2d(x,32,1,1,pad,True,name=name+'b0')
    branch1 = conv2d(x,32,1,1,pad,True,name=name+'b1_1')
    branch1 = conv2d(branch1,32,3,1,pad,True,name=name+'b1_2')
    branch2 = conv2d(x,32,1,1,pad,True,name=name+'b2_1')
    branch2 = conv2d(branch2,48,3,1,pad,True,name=name+'b2_2')
    branch2 = conv2d(branch2,64,3,1,pad,True,name=name+'b2_3')
    branches = [branch0,branch1,branch2]
    mixed = Concatenate(axis=3, name=name + '_concat')(branches)
    filt_exp_1x1 = conv2d(mixed,384,1,1,pad,False,name=name+'filt_exp_1x1')
    final_lay = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale},
                      name=name+'act_scaling')([x, filt_exp_1x1])
    return final_lay
#%%
## Inception ResNet block B
    def incresB(x,scale,name=None):
    pad = 'same'
    branch0 = conv2d(x,192,1,1,pad,True,name=name+'b0')
    branch1 = conv2d(x,128,1,1,pad,True,name=name+'b1_1')
    branch1 = conv2d(branch1,160,[1,7],1,pad,True,name=name+'b1_2')
    branch1 = conv2d(branch1,192,[7,1],1,pad,True,name=name+'b1_3')
    branches = [branch0,branch1]
    mixed = Concatenate(axis=3, name=name + '_mixed')(branches)
    filt_exp_1x1 = conv2d(mixed,1152,1,1,pad,False,name=name+'filt_exp_1x1')
    final_lay = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale},
                      name=name+'act_scaling')([x, filt_exp_1x1])
    return final_lay  
#%%
############### Incerption ResNet c block 
    def incresC(x,scale,name=None):
    pad = 'same'
    branch0 = conv2d(x,192,1,1,pad,True,name=name+'b0')
    branch1 = conv2d(x,192,1,1,pad,True,name=name+'b1_1')
    branch1 = conv2d(branch1,224,[1,3],1,pad,True,name=name+'b1_2')
    branch1 = conv2d(branch1,256,[3,1],1,pad,True,name=name+'b1_3')
    branches = [branch0,branch1]
    mixed = Concatenate(axis=3, name=name + '_mixed')(branches)
    filt_exp_1x1 = conv2d(mixed,2048,1,1,pad,False,name=name+'fin1x1')
    final_lay = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale},
                      name=name+'act_saling')([x, filt_exp_1x1])
    return final_lay
#%%
############################# Stem block
    
img_input = Input(shape=(32,32,3))

x = conv2d(img_input,32,3,2,'valid',True,name='conv1')
x = conv2d(x,32,3,1,'valid',True,name='conv2')
x = conv2d(x,64,3,'valid',True,name='conv3')

x_11 = MaxPooling2D(3,strides=1,padding='valid',name='stem_br_11'+'_maxpool_1')(x)
x_12 = conv2d(64,3,1,'valid',True,name='stem_br_12')

x = Concatenate(axis=3, name = 'stem_concat_1')([x_11,x_12])

x_21 = conv2d(x,64,1,1,'same',True,name='stem_br_211')
x_21 = conv2d(x_21,64,[1,7],1,'same',True,name='stem_br_212')
x_21 = conv2d(x_21,64,[7,1],1,'same',True,name='stem_br_213')
x_21 = conv2d(x_21,96,3,1,'valid',True,name='stem_br_214')

x_22 = conv2d(x,64,1,1,'same',True,name='stem_br_221')
x_22 = conv2d(x_22,96,3,1,'valid',True,name='stem_br_222')

x = Concatenate(axis=3, name = 'stem_concat_2')([x_21,x_22])

x_31 = conv2d(x,192,3,1,'valid',True,name='stem_br_31')
x_32 = MaxPooling2D(3,strides=1,padding='valid',name='stem_br_32'+'_maxpool_2')(x)
x = Concatenate(axis=3, name = 'stem_concat_3')([x_31,x_32])

#%%
#################### Inception-ResNet Network
#Inception-ResNet-A modules
x = incresA(x,0.15,name='incresA_1')
x = incresA(x,0.15,name='incresA_2')
x = incresA(x,0.15,name='incresA_3')
x = incresA(x,0.15,name='incresA_4')
#%%
#35 × 35 to 17 × 17 reduction module.
x_red_11 = MaxPooling2D(3,strides=2,padding='valid',name='red_maxpool_1')(x)

x_red_12 = conv2d(x,384,3,2,'valid',True,name='x_red1_c1')

x_red_13 = conv2d(x,256,1,1,'same',True,name='x_red1_c2_1')
x_red_13 = conv2d(x_red_13,256,3,1,'same',True,name='x_red1_c2_2')
x_red_13 = conv2d(x_red_13,384,3,2,'valid',True,name='x_red1_c2_3')

x = Concatenate(axis=3, name='red_concat_1')([x_red_11,x_red_12,x_red_13])
#%%
#Inception-ResNet-B modules
x = incresB(x,0.1,name='incresB_1')
x = incresB(x,0.1,name='incresB_2')
x = incresB(x,0.1,name='incresB_3')
x = incresB(x,0.1,name='incresB_4')
x = incresB(x,0.1,name='incresB_5')
x = incresB(x,0.1,name='incresB_6')
x = incresB(x,0.1,name='incresB_7')
#%%
#17 × 17 to 8 × 8 reduction module.
x_red_21 = MaxPooling2D(3,strides=2,padding='valid',name='red_maxpool_2')(x)

x_red_22 = conv2d(x,256,1,1,'same',True,name='x_red2_c11')
x_red_22 = conv2d(x_red_22,384,3,2,'valid',True,name='x_red2_c12')

x_red_23 = conv2d(x,256,1,1,'same',True,name='x_red2_c21')
x_red_23 = conv2d(x_red_23,256,3,2,'valid',True,name='x_red2_c22')

x_red_24 = conv2d(x,256,1,1,'same',True,name='x_red2_c31')
x_red_24 = conv2d(x_red_24,256,3,1,'same',True,name='x_red2_c32')
x_red_24 = conv2d(x_red_24,256,3,2,'valid',True,name='x_red2_c33')

x = Concatenate(axis=3, name='red_concat_2')([x_red_21,x_red_22,x_red_23,x_red_24])
#%%
#Inception-ResNet-C modules
x = incresC(x,0.2,name='incresC_1')
x = incresC(x,0.2,name='incresC_2')
x = incresC(x,0.2,name='incresC_3')
#%%
#TOP
x = GlobalAveragePooling2D(data_format='channels_last')(x)
x = Dropout(0.6)(x)
x = Dense(num_classes, activation='softmax')(x)

#%%
#####Building the model
model = Model(img_input,x,name=’inception_resnet_v2')


#####Model Summary
model.summary()

#####Save Model as ‘.png’

from keras.utils.vis_utils import model_to_dot
from tensorflow.keras.utils import plot_model
from IPython.display import SVG
SVG(model_to_dot(model).create(prog=’dot’, format=’svg’))
def viz_model_architecture(model):
    """Visualize model architecture in Jupyter notebook."""
    display(SVG(model_to_dot(model).create(prog='dot', format='svg')))
    def plot_results(self, train, validation, params, model_visualization,
                     loss, acc, auc, min_acc):
        csv_name_sorted = sorted(glob.glob(self.csv_logs_folder + '/*.log'))
        csv_train_sorted = sorted(glob.glob(self.train_logs_folder + '/*.csv'))
        uid_sample = os.path.split(csv_name_sorted[1])[1].split('.')[0]
        final = pd.DataFrame(index=pd.read_csv(list(
            filter(lambda x: uid_sample in x, csv_train_sorted))[0],
                                               header=None)[0].tolist())
        final.index.name = None
        plt.figure(figsize=(8, 6))
        for csv_file in csv_name_sorted:
            if os.path.getsize(csv_file) > 0:
                data = pd.read_csv(csv_file)
                uid = os.path.split(csv_file)[1].split('.')[0]

                if np.amax(data['val_acc']) > min_acc:
                    if validation:
                        if loss:
                            plt.plot(data['epoch'],
                                     data['val_loss'],
                                     label=uid + ' Val Loss')
                        if acc:
                            plt.plot(data['epoch'],
                                     data['val_acc'],
                                     label=uid + ' Val Acc')
                        if auc:
                            plt.plot(data['epoch'],
                                     data['val_auc'],
                                     label=uid + ' Val AUC')
                    if train:
                        if loss:
                            plt.plot(data['epoch'],
                                     data['loss'],
                                     label=uid + ' Train Loss')
                        if acc:
                            plt.plot(data['epoch'],
                                     data['acc'],
                                     label=uid + ' Train Acc')

                    if params:
                        print(uid)
                        train_csv = pd.read_csv(list(
                            filter(lambda x: uid in x, csv_train_sorted))[0],
                                                header=None)
                        train_csv.columns = ['parameters', uid[8:25]]
                        train_csv.set_index('parameters', inplace=True)
                        final = final.join(train_csv)
                        print("*" * 100)
                        print("*" * 100)
                    if model_visualization:
                        model_log = glob.glob(self.model_logs_folder + '/' +
                                              uid + '*.json')[0]
                        with open(model_log) as model_file:
                            json_string = json.load(model_file)
                        model = model_from_json(json_string)
                        print(uid)
                        #                     print(model.summary())
                        print("*" * 100)
                        dot = model_to_dot(model).create(prog='dot',
                                                         format='svg')
                        return SVG(dot)
        plt.ylabel('Accuracy')
        plt.ylim([0, 1])
        plt.xlabel('Epoch')
        plt.legend()
        # plt.close()
        plt.savefig(self.logs_folder + '/out.pdf', transparent=True)
        if params:
            display(final.drop(['data_ID', 'data_id'], axis=0))
        return plt
示例#25
0
    modelDimension.append('3D')
    modelDimension = h3.create_dataset('modelDimension', data=modelDimension)
else:
    print("the dimesnion should be 2D or 3D")
#save the features
activation = {}
act = h3.create_group('activations')
for i, layer in enumerate(model.layers):
    get_activations = K.function([model.input, K.learning_phase()], [
        layer.output,
    ])
    activation[layer.name] = get_activations([X_test, 0])[0]
    a = act.create_dataset(layer.name, data=activation[layer.name])

dot = model_to_dot(model,
                   show_shapes=False,
                   show_layer_names=True,
                   rankdir='TB')
layers_by_depth = model.layers_by_depth

layer_by_depth = h3.create_group('layer_by_depth')
weights = h3.create_group('weights')

maxCol = 0
maxRow = len(layers_by_depth)
## Save the structure,weights the layers' names in .h5 file
for i in range(len(layers_by_depth)):
    i_layer = layer_by_depth.create_group(
        str(i))  #the No i layer  in the model
    for ind, layer in enumerate(
            layers_by_depth[i]):  # the layers in No i layer in the model
        if maxCol < ind:
def preProcessing():
    dftrain = pd.read_csv('datasets/train.csv')
    # sns.countplot(df.label)
    # plt.xlabel('Label')
    # plt.title('Sarcasm vs Non-sarcasm')
    dftrain['tweets'] = dftrain['tweets'].apply(lambda x: x.lower())
    dftrain['tweets'] = dftrain['tweets'].apply((lambda x: re.sub('[^a-zA-z0-9\s]', '', x)))
    for idx, row in dftrain.iterrows():
        row[0] = row[0].replace('rt', ' ')

    max_fatures = 2000
    tokenizer = Tokenizer(num_words=max_fatures, split=' ')
    tokenizer.fit_on_texts(dftrain['tweets'].values)
    Xtrain = tokenizer.texts_to_sequences(dftrain['tweets'].values)
    Xtrain = pad_sequences(Xtrain)

    Y = pd.get_dummies(dftrain['label']).values
    X_train, X_test, Y_train, Y_test = train_test_split(Xtrain, Y, test_size=0.25, random_state=42)
    print(X_train.shape, Y_train.shape)
    print(X_test.shape, Y_test.shape)


    embed_dim = 128
    lstm_out = 196
    model = Sequential()
    model.add(Embedding(max_fatures, embed_dim, input_length=Xtrain.shape[1]))
    model.add(SpatialDropout1D(0.4))
    model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dense(2, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    SVG(model_to_dot(model).create(prog='dot', format='svg'))

    batch_size = 32
    history = model.fit(X_train, Y_train, epochs=5, batch_size=batch_size, verbose=2)

    validation_size = 1500

    X_validate = X_test[-validation_size:]
    Y_validate = Y_test[-validation_size:]
    X_test = X_test[:-validation_size]
    Y_test = Y_test[:-validation_size]
    score, acc = model.evaluate(X_test, Y_test, verbose=2, batch_size=batch_size)
    print("score: %.2f" % (score))
    print("acc: %.2f" % (acc))

    # summarize history for accuracy
    plt.plot(history.history['accuracy'])
    # plt.plot(history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()
    plt.savefig('model_accuracy.png')
    # summarize history for loss
    plt.plot(history.history['loss'])
    # plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()
    plt.savefig('model_loss.png')

    pos_cnt, neg_cnt, pos_correct, neg_correct = 0, 0, 0, 0
    for x in range(len(X_validate)):

        result = model.predict(X_validate[x].reshape(1, X_test.shape[1]), batch_size=1, verbose=2)[0]

        if np.argmax(result) == np.argmax(Y_validate[x]):
            if np.argmax(Y_validate[x]) == 0:
                neg_correct += 1
            else:
                pos_correct += 1

        if np.argmax(Y_validate[x]) == 0:
            neg_cnt += 1
        else:
            pos_cnt += 1

    print("Sarcasm_acc", pos_correct/pos_cnt*100, "%")
    print("Non-Sarcasm_acc", neg_correct/neg_cnt*100, "%")
示例#27
0
model.summary()

#%%

# Viz model

from keras.utils.vis_utils import plot_model
plot_model(model, to_file='/Users/alvaro/Downloads/model.png')

#%%

from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

SVG(model_to_dot(model).create(prog='dot', format='svg'))

#%%
#
# Compile and train
#
# TODO: change optimizer from RMSPROP to SGD as per paper?
model.compile(optimizer='rmsprop',
              loss = 'categorical_crossentropy',
              metrics = ['acc']
              )

history = model.fit(x_train, y_train,
                    epochs= 20,
                    batch_size = 128,
                    validation_split = 0.2
示例#28
0
文件: utils.py 项目: fsadannn/snn
 def _repr_svg_(self):
     return model_to_dot(self.model).create_svg().decode('utf8')
示例#29
0
activation = Activation('relu')(encoded_rnaseq_2)
decoded_rnaseq = Dense(num_features, activation='sigmoid')(activation)

autoencoder = Model(input_rnaseq, decoded_rnaseq)

# In[10]:

autoencoder.summary()

# In[11]:

# Visualize the connections of the custom VAE model
output_model_file = os.path.join('figures', 'adage_architecture.png')
plot_model(autoencoder, to_file=output_model_file)

SVG(model_to_dot(autoencoder).create(prog='dot', format='svg'))

# In[12]:

# Separate out the encoder and decoder model
encoder = Model(input_rnaseq, encoded_rnaseq_2)

encoded_input = Input(shape=(encoding_dim, ))
decoder_layer = autoencoder.layers[-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))

# In[13]:

# Compile the autoencoder to prepare for training
adadelta = optimizers.Adadelta(lr=learning_rate)
autoencoder.compile(optimizer=adadelta, loss='mse')
    def mgcNetArch(self, **kwargs):
        """  
        CNN architecture variant
        return specified model, model summary and model network plot
        Arguments:
            outLayer: 'gloAvg' or 'mlp' at last layer, either use global averaging or multi layer perceptron
            l2_val: l2 regularization, default 0.002
            net_architr: cnn_max, cnn_stride or net_in_net
            
        """

        def_vals = {
            "input_img_rows": self.input_img_rows,
            "input_img_cols": self.input_img_cols,
            "channels": self.channels,
            "nb_classes": self.nb_classes,
            "outLayer": 'gloAvg',
            "l2_val": 0.00,
            "net_architr": 'cnn_max',
            "block_typex": 'basic',
            "block_repeatx": [1, 1]
        }

        for k, v in def_vals.items():
            kwargs.setdefault(k, v)

        _input_img_rows = kwargs['input_img_rows']
        _input_img_cols = kwargs['input_img_cols']
        _channels = kwargs['channels']
        _nb_classes = kwargs['nb_classes']
        _outLayer = kwargs['outLayer']
        _l2_val = kwargs['l2_val']
        _net_architr = kwargs['net_architr']
        _block_typex = kwargs['block_typex']
        _block_repeatx = kwargs['block_repeatx']

        params = {
            "input_img_rows": _input_img_rows,
            "input_img_cols": _input_img_cols,
            "channels": _channels,
            "nb_classes": _nb_classes
        }

        print(_net_architr)

        if _net_architr == 'cnn_max':
            model = mgcNetArchMax(outLayer=_outLayer, l2_val=_l2_val, **params)

        elif _net_architr == 'cnn_stride':
            model = mgcNetArchStride2(outLayer=_outLayer,
                                      l2_val=_l2_val,
                                      **params)

        elif _net_architr == 'net_in_net':
            model = mgcNetArchNin(outLayer=_outLayer, l2_val=_l2_val, **params)

        elif _net_architr == 'resnet':
            model = mgcResnet(block_type=_block_typex,
                              block_repeat=_block_repeatx,
                              **params)

        elif _net_architr == 'resblock':
            model = mgcNetArchRes(outLayer=_outLayer, l2_val=_l2_val, **params)

        elif _net_architr == 'skipconnect':
            model = mgcNetArchSkip(outLayer=_outLayer,
                                   l2_val=_l2_val,
                                   **params)

        self.model = model
        self.plot_model = SVG(
            model_to_dot(model, show_shapes=True).create(prog='dot',
                                                         format='svg'))
        #self.model_summary = model.summary()

        return self
示例#31
0
### START CODE HERE ###
img_path = 'images/happy.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)

x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

print(happyModel.predict(x))

# ## 5 - Other useful functions in Keras (Optional)
#
# Two other basic features of Keras that you'll find useful are:
# - `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
# - `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
#
# Run the following code.

# In[21]:

happyModel.summary()

# In[12]:

plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))

# In[ ]:
    return dnn_model


# In[68]:

w2v_dnn = construct_deepnn_architecture(num_input_features=500)


# ### Visualize sample deep architecture

# In[21]:

from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

SVG(model_to_dot(w2v_dnn, show_shapes=True, show_layer_names=False, 
                 rankdir='TB').create(prog='dot', format='svg'))


# ### Model Training, Prediction and Performance Evaluation

# In[69]:

batch_size = 100
w2v_dnn.fit(avg_wv_train_features, y_train, epochs=5, batch_size=batch_size, 
            shuffle=True, validation_split=0.1, verbose=1)


# In[70]:

y_pred = w2v_dnn.predict_classes(avg_wv_test_features)
predictions = le.inverse_transform(y_pred)