Esempio n. 1
0
def model():
    #This is our LSTM model. we have used keras laters here. The loss function is mean squared error. we have used 'Adam Optimizer'
    mod = Sequential()
    mod.add(
        LSTM(units=64,
             return_sequences=True,
             input_shape=(X_train.shape[1], 9)))
    mod.add(Dropout(0.2))
    mod.add(BatchNormalization())
    mod.add(LSTM(units=64, return_sequences=True))
    mod.add(Dropout(0.1))
    mod.add(BatchNormalization())

    mod.add((LSTM(units=64)))
    mod.add(Dropout(0.1))
    mod.add(BatchNormalization())
    mod.add((Dense(units=16, activation='tanh')))
    mod.add(BatchNormalization())
    mod.add((Dense(units=4, activation='tanh')))
    mod.compile(loss='mean_squared_error',
                optimizer='adam',
                metrics=['accuracy', 'mean_squared_error'])
    mod.summary()

    return mod
Esempio n. 2
0
def simple_model(pretrained_weights=None, input_size=(256, 256, 1)):
    inputs = tf.keras.Input(input_size)
    conv1 = tf.keras.layers.Conv2D(64,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(
                                       inputs)  # 256
    conv1 = tf.keras.layers.Conv2D(32,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    conv1 = tf.keras.layers.Conv2D(32,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    conv1 = tf.keras.layers.Conv2D(16,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    conv1 = tf.keras.layers.Conv2D(8,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    conv1 = tf.keras.layers.Conv2D(1,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)

    model = tf.keras.models.Model(inputs=inputs, outputs=conv1)

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4),
                  loss="mean_absolute_error",
                  metrics=['accuracy'],
                  run_eagerly=True)

    model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
Esempio n. 3
0
def make_common_model():
    model = Sequential([
        Input(shape=(300, 300, 3), name='input_layer'),

        # size of parameter = n_filters * (filter_size + 1) = 32*(9+1) = 320
        # using 32 filter
        # filter size is 3
        Conv2D(64, kernel_size=(1, 1)),
        BatchNormalization(),
        Activation('relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Conv2D(32, kernel_size=(3, 3)),
        BatchNormalization(),
        Activation('relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Conv2D(64, kernel_size=(1, 1)),
        BatchNormalization(),
        Activation('relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Conv2D(32, kernel_size=(3, 3)),
        BatchNormalization(),
        Activation('relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Conv2D(64, kernel_size=(1, 1)),
        BatchNormalization(),
        Activation('relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Conv2D(32, kernel_size=(3, 3)),
        BatchNormalization(),
        Activation('relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Flatten(),
        Dense(24, activation='relu'),
        Dropout(0.5),
        Dense(3, activation='softmax', name='output_layer')
    ])

    model.summary()

    return model
Esempio n. 4
0
def resnet_18(input_shape=(224,224,3), nclass=1000):
    """
    build resnet-18 model using keras with TensorFlow backend.
    :param input_shape: input shape of network, default as (224,224,3)
    :param nclass: numbers of class(output shape of network), default as 1000
    :return: resnet-18 model
    """
    input_ = Input(shape=input_shape)
 
    conv1 = conv2d_bn(input_, 64, kernel_size=(7, 7), strides=(2, 2))
    pool1 = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(conv1)
 
    conv2 = residual_block(64, 2, is_first_layer=True)(pool1)
    conv3 = residual_block(128, 2, is_first_layer=True)(conv2)
    conv4 = residual_block(256, 2, is_first_layer=True)(conv3)
    conv5 = residual_block(512, 2, is_first_layer=True)(conv4)
 
    pool2 = GlobalAvgPool2D()(conv5)
    output_ = Dense(nclass, activation='softmax')(pool2)
 
    model = Model(inputs=input_, outputs=output_)
    model.summary()
 
    return model
Esempio n. 5
0
                                _verb_only=args.verb_only,
                                out_verbs=nb_verbs,
                                out_nouns=nb_nouns)
        elif args.type == 'AttentionTemporalCNN':
            model = Attention.AttentionTemporalCNN(name=args.name,
                                                   verb_only=args.verb_only,
                                                   out_verbs=nb_verbs,
                                                   out_nouns=nb_nouns)
        else:
            raise NameError(
                'Model type must be one of: ConvLSTM | LSTM | ConvNet1D | AttentionTemporalCNN'
            )

    # build the model and print its summary
    model.build(input_shape=(None, None, 256, 256, 3))
    model.summary(line_length=150)

    # load categories
    if args.use_categories:
        with open('data/EPIC_100_verbs_categories.yaml',
                  'r') as fv, open('data/EPIC_100_nouns_categories.yaml',
                                   'r') as fn:
            verbs_categories = yaml.load(fv, Loader=yaml.FullLoader)
            nouns_categories = yaml.load(fn, Loader=yaml.FullLoader)

    # utilities
    with strategy.scope():
        if args.optimizer == 'sgd':
            optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr,
                                                momentum=0.9,
                                                clipnorm=args.clip_norm)
# @Date:   2019-12-26T10:27:56+01:00
# @Last modified time: 2020-02-20T20:15:32+01:00

# import sys

from keras.utils import plot_model
import model

import os
import tensorflow as tf
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3  #0.41
session = tf.Session(config=config)

numC = 17
patch_shape = (32, 32, 10)

model = model.sen2LCZ_drop(patch_shape,
                           num_classes=17,
                           bn=1,
                           depth=17,
                           dropRate=0.2,
                           fusion=1)
model.summary()
plot_model(model,
           to_file='./modelFig/' + 'sen2LCZ_drop.png',
           show_shapes='True')
Esempio n. 7
0
def full_model(pretrained_weights=None, input_size=(256, 256, 1)):

    inputs = tf.keras.Input(input_size)
    conv1 = tf.keras.layers.Conv2D(64,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(
                                       inputs)  # 256
    conv1 = tf.keras.layers.Conv2D(64,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool1)  # 128
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool2)  # 64
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool3)  # 32
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool4)  # 16
    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(drop5))
    merge6 = concatenate([drop4, up6], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv6))
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv7))
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv8))
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    conv9 = Conv2D(2,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    # conv10 = Conv2D(1, 1, activation = 'relu')(conv9)
    # conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv10)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

    model = tf.keras.models.Model(inputs=inputs, outputs=conv10)

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4),
                  loss="mean_absolute_error",
                  metrics=['accuracy'],
                  run_eagerly=True)

    model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
Esempio n. 8
0
    hf = h5py.File(file,'r')

    x_tr = (hf['x'][:16000])[:,:,newaxis]
    x_v = (hf['x'][16000:20000])[:,:,newaxis]
    x_t = (hf['x'][20000:])[:,:,newaxis]

    Y = hf['y']
    y = np.zeros((Y.size,5))
    y[np.arange(Y.size),Y] = 1.0
    y_tr = y[:16000]
    y_v = y[16000:20000]
    y_t = y[20000:]


    model = model.model()
    print(model.summary())

    model, hist = train(model,x_tr,y_tr,x_v,y_v)

    fig, loss_ax = plt.subplots()

    loss_ax.plot(hist.history['loss'], 'y', label='train loss')
    loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
    loss_ax.set_xlabel('epoch')
    loss_ax.set_ylabel('loss')
    loss_ax.legend(loc='upper right')

    plt.savefig('loss-epoch.png')

    fig, acc_ax = plt.subplots()
Esempio n. 9
0
def make_resnet_model():

    model = Sequential()

    model.add(Input(shape=(300, 300, 3), name='input_layer'), )
    model.add(ZeroPadding2D(padding=(3, 3)))

    model.add(Conv2D(32, (10, 10), strides=2, kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(MaxPooling2D((2, 2), strides=1, padding='same'))

    model.add(
        Conv2D(32, (1, 1),
               strides=1,
               padding='valid',
               kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Conv2D(32, (3, 3),
               strides=1,
               padding='same',
               kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    # model.add(MaxPooling2D((2, 2), strides=1, padding='same'))

    model.add(
        Conv2D(32, (1, 1),
               strides=2,
               padding='valid',
               kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Conv2D(32, (3, 3),
               strides=1,
               padding='same',
               kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Conv2D(32, (3, 3),
               strides=1,
               padding='valid',
               kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    # model.add(MaxPooling2D((2, 2), strides=1, padding='same'))
    # model.add(Conv2D(8, (1, 1), strides=1, padding='same', activation='relu', kernel_initializer='he_normal'))

    # model.add(Flatten())
    # model.add(Dense(8, activation='relu'))

    # model.add(Dropout(0.5))
    model.add(GlobalAveragePooling2D())

    model.add(Dense(3, activation='softmax', name='output_layer'))

    model.summary()

    return model
Esempio n. 10
0
    with torch.no_grad():
        for idx, (label, text, offsets) in enumerate(data_loader):
            predited_label = model(text, offsets)
            loss = criterion(predited_label, label)
            total_acc += (predited_label.argmax(1) == label).sum().item()
            total_count += label.size(0)
    return total_acc / total_count


if __name__ == '__main__':
    tokenizer, vocab = get_tokenizer_vocab()
    text_pipeline, label_pipeline = get_pipeline(tokenizer, vocab)
    vocab_size, emsize, num_class = get_model_params(vocab)
    model = TextClassificationModel(vocab_size, emsize, num_class).to(device)

    summary(model)
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=LR)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
    total_accu = None

    train_iter, test_iter = AG_NEWS(root='../dataset')
    test_dataset = list(test_iter)
    split_train_, split_valid_ = get_train_valid_split(train_iter)

    train_data_loader = DataLoader(split_train_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)
    valid_data_loader = DataLoader(split_valid_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)
    test_data_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)

    for epoch in range(1, EPOCHS + 1):
        epoch_start_time = time.time()
Esempio n. 11
0
    print('X_test shape:', X_test.shape)
    train_label = to_categorical(y_train, 2)
    test_label = to_categorical(y_test, 2)
    print('Build model...')

    ##############################################  Model prepare  #########################################################
    model = model.build_LSTM_CNN(MAX_SEQUENCE_LENGTH, nb_words,
                                 word_embedding_matrix, NB_FILTER)
    model = model.build_CNN_LSTM(MAX_SEQUENCE_LENGTH, nb_words,
                                 word_embedding_matrix, NB_FILTER)

    model.compile(
        loss='categorical_crossentropy',
        optimizer='adagrad',  # rmsprop
        metrics=['accuracy'])
    model.summary()  # 打印出模型概况
    #############################################  Train Model  ############################################################

    # 该回调函数将在每个epoch后保存模型到 filepath
    checkpoint = ModelCheckpoint(filepath=best_model_tmp,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')

    t0 = time.time()
    history = model.fit(
        X_train,
        train_label,
        batch_size=BATCH_SIZE,
        validation_data=(X_test, test_label),
Esempio n. 12
0
	# 建模
	# model = model.buildCNN(MAX_SEQUENCE_LENGTH, nb_words,
	# 					   word_embedding_matrix, FILTER_LENGTH, NB_FILTER)
	# model = model.buildLstmCnn(MAX_SEQUENCE_LENGTH, nb_words,
	# 					   word_embedding_matrix, FILTER_LENGTH, NB_FILTER)
	# model = model.buildCnnLSTM(MAX_SEQUENCE_LENGTH, nb_words,
	# 						   word_embedding_matrix, NB_FILTER)
	# model = model.buildLstmPool(nb_words, word_embedding_matrix ,MAX_SEQUENCE_LENGTH)
	model = model.LSTM3(nb_words, word_embedding_matrix, MAX_SEQUENCE_LENGTH)
	# model = model.BiLSTM(nb_words, word_embedding_matrix, MAX_SEQUENCE_LENGTH)
	# model = model.BiLstmPool(nb_words, word_embedding_matrix, MAX_SEQUENCE_LENGTH, POOL_LENGTH)

	model.compile(loss='categorical_crossentropy', optimizer='adagrad',  # adam
				  metrics=['accuracy'])
	model.summary()  # 打印出模型概况
	callbacks = [ModelCheckpoint(MODEL_WEIGHTS_FILE,
								 monitor='val_acc', save_best_only=True)]

	t0 = time.time()
	history = model.fit(X_train, train_label,
						batch_size=BATCH_SIZE,
						verbose=1,
						validation_split=VALIDATION_SPLIT, # (X_test, test_label)
						callbacks=callbacks,
						nb_epoch=NB_EPOCHS)
	t1 = time.time()
	print("Minutes elapsed: %f" % ((t1 - t0) / 60.))

	# 将模型和权重保存到指定路径
	model.save(model_path)