コード例 #1
0
ファイル: train_frcnn.py プロジェクト: TrojanXu/keras-frcnn
else:
	input_shape_img = (None, None, 3)

img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))

# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)

# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)

classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)

model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)

# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)

try:
	print('loading weights from {}'.format(C.base_net_weights))
	model_rpn.load_weights(C.base_net_weights, by_name=True)
	model_classifier.load_weights(C.base_net_weights, by_name=True)
except:
	print('Could not load pretrained model weights. Weights can be found in the keras application folder \
		https://github.com/fchollet/keras/tree/master/keras/applications')

optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
コード例 #2
0
from keras.optimizers import SGD

base_model = InceptionV3(weights='imagenet',
                         include_top=False,
                         input_shape=(224, 224, 3))

# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(6, activation='softmax')(x)

# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
model.load_weights('./inet.h5')
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])


def Occlusion_exp(image_path, occluding_size, occluding_pixel,
                  occluding_stride):
    image = cv2.imread(image_path)
    im = cv2.resize(image, (224, 224)).astype('uint8')
    print im.shape
    print im.shape
    im = np.expand_dims(im, axis=0)
    print im.shape
    out = model.predict(im)
コード例 #3
0
ファイル: test_sim.py プロジェクト: codeaudit/fabric
def main():
    #o_path = "/Users/ra-mit/development/fabric/uns/sim/"
    o_path = "/data/eval/wiki/"

    from utils import prepare_sqa_data
    #data = prepare_sqa_data.get_sqa(filter_stopwords=True)

    if not fb and not wiki:

        spos = prepare_sqa_data.get_spo_from_rel(filter_stopwords=True)

        uns_spos, loc_dic = prepare_sqa_data.get_spo_from_uns()

        spos = spos + uns_spos

    if wiki:
        # structured_path = "/Users/ra-mit/data/fabric/dbpedia/triples_structured/all.csv"
        structured_path = "/data/smalldatasets/wiki/all.csv"
        # unstructured_path = "/Users/ra-mit/data/fabric/dbpedia/triples_unstructured/"
        unstructured_path = "/data/smalldatasets/wiki/triples_unstructured/"
        spos = []
        df = pd.read_csv(structured_path, encoding='latin1')
        ss = list(df.iloc[:, 0])
        ps = df.iloc[:, 1]
        os = df.iloc[:, 2]
        for s, p, o in zip(ss, ps, os):
            spos.append((s, p, o))
        print("Total structured spos: " + str(len(spos)))

        # uns_files = csv_access.list_files_in_directory(unstructured_path)
        uns_spos, loc_dic = prepare_sqa_data.get_spo_from_uns(path=unstructured_path)
        # uns_spos = []
        # for f in uns_files:
        #     df = pd.read_csv(f, encoding='latin1')
        #     ss = list(df.iloc[:, 0])
        #     ps = df.iloc[:, 1]
        #     os = df.iloc[:, 2]
        #     for s, p, o in zip(ss, ps, os):
        #         uns_spos.append((s, p, o))

        print("Total unstructured spos: " + str(len(uns_spos)))

        spos += uns_spos
        print("Total: " + str(len(spos)))

    true_pairs = []
    S = []
    P = []
    O = []
    # positive pairs
    for s, p, o in spos:
        s = str(s)
        p = str(p)
        o = str(o)
        true_pairs.append((s, p, 0))
        true_pairs.append((s, o, 0))
        true_pairs.append((p, o, 0))
        S.append(s)
        P.append(p)
        O.append(o)

    if not fb and not wiki:
        with open(o_path + "true_pairs.pkl", "wb") as f:
            pickle.dump(true_pairs, f)

    print("True pairs: " + str(len(true_pairs)))

    # set to avoid negative samples that collide with positive ones
    pos = set()
    for e1, e2, label in true_pairs:
        pos.add(e1 + e2)

    print("Unique true pairs: " + str(len(pos)))

    # negative pairs
    random_permutation = np.random.permutation(len(S))
    S = np.asarray(S)
    S = S[random_permutation]
    random_permutation = np.random.permutation(len(O))
    O = np.asarray(O)
    O = O[random_permutation]

    false_pairs = []
    for s, p, o in zip(list(S), P, list(O)):
        if s + p in pos or s + o in pos or p + o in pos:
            continue  # this is probably colliding with pos, so we do not include
        false_pairs.append((s, p, 1))
        false_pairs.append((s, o, 1))
        false_pairs.append((p, o, 1))

    print("Negative pairs 1: " + str(len(false_pairs)))

    random_permutation = np.random.permutation(len(S))
    S = np.asarray(S)
    S = S[random_permutation]
    random_permutation = np.random.permutation(len(O))
    O = np.asarray(O)
    O = O[random_permutation]

    false_pairs2 = []
    for s, p, o in zip(list(S), P, list(O)):
        if s + p in pos or s + o in pos or p + o in pos:
            continue  # this is probably colliding with pos, so we do not include
        false_pairs2.append((s, p, 1))
        false_pairs2.append((s, o, 1))
        false_pairs2.append((p, o, 1))

    print("Negative pairs 2: " + str(len(false_pairs2)))

    all_data = true_pairs + false_pairs + false_pairs2

    sparsity_code_size = 48

    if fb:
        sparsity_code_size = 4 # 1 word per clause
        o_path = "/data/eval/fb/"
        all_data, true_pairs = process_fb.extract_data()
        
        # start counting vals
        #_test = all_data[:2000]  # test
        #total = 0
        #for s, p, label in _test:
        #    total += label
        #print("total: " + str(total/len(all_data)))
        # end counting vals
      
        random_permutation = np.random.permutation(len(all_data))
        all_data = np.asarray(all_data)
        all_data = all_data[random_permutation]
        with open(o_path + "true_pairs.pkl", "wb") as f:
            pickle.dump(true_pairs, f)
        #all_data = all_data[:2000]  # test
        #total = 0
        #for s, p, label in all_data:
        #    total += label
        #print("total: " + str(total/len(all_data)))

    if wiki:
        sparsity_code_size = 48
        o_path = "/data/eval/wiki/"
        random_permutation = np.random.permutation(len(all_data))
        all_data = np.asarray(all_data)
        all_data = all_data[random_permutation]
        with open(o_path + "true_pairs.pkl", "wb") as f:
            pickle.dump(true_pairs, f)

    vocab = dict()

    if not fb:
        idx_vectorizer = IndexVectorizer(vocab_index=vocab, sparsity_code_size=sparsity_code_size, tokenizer_sep=" ")
    else:
        idx_vectorizer = FlatIndexVectorizer(vocab_index=vocab, sparsity_code_size=sparsity_code_size)
    vectorizer = tp.CustomVectorizer(idx_vectorizer)

    st = time.time()
    print("start vectorizing...")
    # vectorization happens here
    X1 = []
    X2 = []
    Y = []
    for e1, e2, label in all_data:
        ve1 = vectorizer.get_vector_for_tuple(e1)
        ve1 = ve1.toarray()[0]
        ve2 = vectorizer.get_vector_for_tuple(e2)
        ve2 = ve2.toarray()[0]
        X1.append(ve1)
        X2.append(ve2)
        Y.append(label)

    X1 = np.asarray(X1)
    X2 = np.asarray(X2)
    Y = np.asarray(Y)

    et = time.time()
    print("finish vectorizing...")
    print("took: " + str(et-st))

    vocab, inv_vocab = vectorizer.get_vocab_dictionaries()

    print("vocab size: " + str(len(vocab)))

    # def model1():
    input_dim = sparsity_code_size * 32

    # declare network
    i1 = Input(shape=(input_dim,), name="i1")
    i2 = Input(shape=(input_dim,), name="i2")

    base = Sequential()
    base.add(Dense(1024, input_shape=(input_dim,), activation='relu'))
    #base.add(Dense(2056, input_shape=(input_dim,), activation='relu'))
    #base.add(Dense(512, input_shape=(input_dim,), activation='relu'))
    #base.add(Dense(2056, activation='relu'))
    #base.add(Dense(768, activation='relu'))
    base.add(Dense(512, activation='relu'))
    #base.add(Dense(1024, activation='relu'))
    base.add(Dense(256, activation='relu'))
    base.add(Dense(128, activation='relu'))
    #base.add(Dense(64, activation='relu'))

    emb_1 = base(i1)
    emb_2 = base(i2)

    def euclidean_distance(vects):
        x, y = vects
        return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))

    def eucl_dist_output_shape(shapes):
        shape1, shape2 = shapes
        return shape1[0], 1

    def contrastive_loss(y_true, y_pred):
        margin = 1
        # Y=0 means similar and Y=1 means dissimilar. Think of it as distance
        return K.mean((1 - y_true) * K.square(y_pred) + y_true * K.square(K.maximum(margin - y_pred, 0)))

    distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([emb_1, emb_2])

    fullmodel = Model(input=[i1, i2], output=distance)

    opt = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)

    callbacks = []
    callback_best_model = keras.callbacks.ModelCheckpoint(o_path + "epoch-{epoch}.h5",
                                                                  monitor='val_loss',
                                                                  save_best_only=False)
    callbacks.append(callback_best_model)

    fullmodel.compile(optimizer=opt, loss=contrastive_loss, metrics=['accuracy'])

    fullmodel.summary()

    def size(model):  # Compute number of params in a model (the actual number of floats)
        return sum([np.prod(K.get_value(w).shape) for w in model.trainable_weights])

    print("trainable params: " + str(size(fullmodel)))

    fullmodel.fit([X1, X2], Y, epochs=300, shuffle=True, batch_size=80, callbacks=callbacks)

    encoder = Model(input=i1, output=emb_1)

    fullmodel.save(o_path + "/sim.h5")
    encoder.save(o_path + "/sim_encoder.h5")

    with open(o_path + "tf_dictionary.pkl", "wb") as f:
        pickle.dump(vocab, f)
コード例 #4
0
embedding_layer = Embedding(len(word_index),
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=True)

sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)  # global max pooling
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(2, activation='softmax')(x)

model = Model(sequence_input, preds)
model.compile(loss='binary_crossentropy', optimizer=rmsprop, metrics=['acc'])

print(model.summary())

# happy learning!
history = model.fit(x_train,
                    y_train,
                    validation_data=(x_val, y_val),
                    epochs=50,
                    batch_size=512)
def get_sentence_attention(word_model, word_length, sent_length, n_classes):
    #x = Permute((2,1))(si_vects)
    nclasses = 1
    input = Input(shape=(sent_length, word_length), dtype='int32')
    print(' input to sentence attn network', word_model)
    preds = []
    attentions_pred = []
    #print(output.summary())
    si_vects = TimeDistributed(word_model)(input)
    print('Shape after si_vects', si_vects.shape)
    #u_it = TimeDistributed(TimeDistributed(BatchNormalization()))(si_vects)
    u_it = TimeDistributed(TimeDistributed(Dense(256,
                                                 activation='tanh')))(si_vects)
    print('Shape after word vector', u_it.shape)
    #u_it = TimeDistributed(TimeDistributed(BatchNormalization()))(u_it)

    #h_it = TimeDistributed(Reshape((100,word_length)))(si_vects)
    #print('Shape after reshape word vector',h_it.shape)

    attn_final_word = [
        TimeDistributed(ATTNWORD(1))(u_it) for i in range(nclasses)
    ]
    #a_it = Reshape(( word_length, 1))(a_it)
    #h_it = Reshape((word_length, 512))(h_it)
    print('ATTN Shape', attn_final_word[0].shape)
    attn_final_word = [
        Multiply()([si_vects, attn_final_word[i]]) for i in range(nclasses)
    ]  #Multiply()([h_it,a_it])
    print('Multi word Shape', attn_final_word[0].shape)
    attn_final_word = [
        Reshape((sent_length, 256, word_length))(attn_final_word[i])
        for i in range(nclasses)
    ]
    print('Shape of the att1 is {}'.format(attn_final_word[0].shape))
    attn_final_word = [
        Lambda(lambda x: K.sum(x, axis=3))(attn_final_word[i])
        for i in range(nclasses)
    ]
    print('Shape of the lambda word is {}'.format(attn_final_word[0].shape))
    ind_t = 0
    attn_sents_for_all_classes = []
    #attn_final_word[0] = SpatialDropout1D(0.2)(attn_final_word[0])
    x = Bidirectional(CuDNNGRU(128, return_sequences=True))(attn_final_word[0])
    x = SpatialDropout1D(0.2)(x)
    x = BatchNormalization()(x)
    print("Shape of X-X is {}".format(x.shape))
    x1 = Conv1D(128, 2, activation='relu')(x)
    x1_mp = GlobalMaxPooling1D()(x1)
    #x1_av = AveragePooling1D(1)(x1)
    x2 = Conv1D(128, 3, activation='relu')(x)
    x2_mp = GlobalMaxPooling1D()(x2)
    #x2_av = AveragePooling1D(1)(x2)
    x3 = Conv1D(128, 4, activation='relu')(x)
    #x3_mp = MaxPooling1D(1)(x3)
    x3_av = GlobalAveragePooling1D()(x3)
    #x = Concatenate()([Flatten()(x1_mp), Flatten()(x1_av),Flatten()(x2_mp), Flatten()(x2_av),Flatten()(x3_mp), Flatten()(x3_av)])
    #x = Concatenate()([Flatten()(x1_mp), Flatten()(x2_mp), Flatten()(x3_av)])
    x = Concatenate()([x1_mp, x2_mp, x3_av])
    x = BatchNormalization()(x)

    #x = Dense(256, activation='relu')(x)
    #x = Dropout(0.25)(x)
    #x = Dense(128, activation='relu')(x)
    #x = Dropout(0.25)(x)
    x = Dense(64, activation='relu')(x)
    #x = Dropout(0.25)(x)
    p = Dense(n_classes, activation='sigmoid')(x)

    model = Model(input, p)

    return model
コード例 #6
0
ファイル: net.py プロジェクト: hancan0/torch_ecg
def main():
    input_layer = Input((2560, 1))
    output_layer = build_model(input_layer=input_layer,block="resnext",start_neurons=16,DropoutRatio=0.5,filter_size=32,nClasses=2)
    model = Model(input_layer, output_layer)
    print(model.summary())
コード例 #7
0
ファイル: emotionRecTrain.py プロジェクト: mrkarma/Musotion
def main():
    # used to get the session/graph data from keras
    K.set_learning_phase(0)
    # get the data in a Pandas dataframe
    raw_data = pd.read_csv(FLAGS.csv_file)

    # convert to one hot vectors
    emotion_array = process_emotion(raw_data[['emotion']])
    # convert to a 48x48 float matrix
    pixel_array = process_pixels(raw_data[['pixels']])

    # split for test/train
    y_train, y_test = split_for_test(emotion_array)
    x_train_matrix, x_test_matrix = split_for_test(pixel_array)

    n_train = int(len(x_train_matrix))
    n_test = int(len(x_test_matrix))

    x_train_input = duplicate_input_layer(x_train_matrix, n_train)
    x_test_input = duplicate_input_layer(x_test_matrix, n_test)

    # vgg 16. include_top=False so the output is the 512 and use the learned weights
    vgg16 = VGG16(include_top=False, input_shape=(48, 48, 3), pooling='avg', weights='imagenet')

    # get vgg16 outputs
    x_train_feature_map = get_vgg16_output(vgg16, x_train_matrix, n_train)
    x_test_feature_map = get_vgg16_output(vgg16, x_test_matrix, n_test)

    # build and train model
    top_layer_model = Sequential()
    top_layer_model.add(Dense(256, input_shape=(512,), activation='relu'))
    top_layer_model.add(Dense(256, input_shape=(256,), activation='relu'))
    top_layer_model.add(Dropout(0.5))
    top_layer_model.add(Dense(128, input_shape=(256,)))
    top_layer_model.add(Dense(NUM_CLASSES, activation='softmax'))

    adamax = Adamax()

    top_layer_model.compile(loss='categorical_crossentropy',
                            optimizer=adamax, metrics=['accuracy'])

    # train
    top_layer_model.fit(x_train_feature_map, y_train,
                        validation_data=(x_train_feature_map, y_train),
                        nb_epoch=FLAGS.n_epochs, batch_size=FLAGS.batch_size)
    # Evaluate
    score = top_layer_model.evaluate(x_test_feature_map,
                                     y_test, batch_size=FLAGS.batch_size)

    print("After top_layer_model training (test set): {}".format(score))

    # Merge two models and create the final_model_final_final
    inputs = Input(shape=(48, 48, 3))
    vg_output = vgg16(inputs)
    print("vg_output: {}".format(vg_output.shape))
    # TODO: the 'pooling' argument of the VGG16 model is important for this to work otherwise you will have to  squash
    # output from (?, 1, 1, 512) to (?, 512)
    model_predictions = top_layer_model(vg_output)
    final_model = Model(input=inputs, output=model_predictions)
    final_model.compile(loss='categorical_crossentropy',
                        optimizer=adamax, metrics=['accuracy'])
    final_model_score = final_model.evaluate(x_train_input,
                                             y_train, batch_size=FLAGS.batch_size)
    print("Sanity check - final_model (train score): {}".format(final_model_score))

    final_model_score = final_model.evaluate(x_test_input,
                                             y_test, batch_size=FLAGS.batch_size)
    print("Sanity check - final_model (test score): {}".format(final_model_score))
    # config = final_model.get_config()
    # weights = final_model.get_weights()

    # probably don't need to create a new model
    # model_to_save = Model.from_config(config)
    # model_to_save.set_weights(weights)
    model_to_save = final_model

    print("Model input name: {}".format(model_to_save.input))
    print("Model output name: {}".format(model_to_save.output))

    # Save Model
    builder = saved_model_builder.SavedModelBuilder(FLAGS.export_path)
    signature = predict_signature_def(inputs={'images': model_to_save.input},
                                      outputs={'scores': model_to_save.output})
    with K.get_session() as sess:
        builder.add_meta_graph_and_variables(sess=sess,
                                             tags=[tag_constants.SERVING],
                                             signature_def_map={'predict': signature})
        builder.save()
コード例 #8
0
               padding='same',
               activation='relu',
               dilation_rate=2)(y)
    y = Dropout(dropout)(y)
    y = MaxPooling2D()(y)
    filters *= 2

# merge left and right branches outputs
y = concatenate([x, y])
# feature maps to vector in preparation to connecting to Dense layer
y = Flatten()(y)
y = Dropout(dropout)(y)
outputs = Dense(num_labels, activation='softmax')(y)

# build the model in functional API
model = Model([left_inputs, right_inputs], outputs)
# verify the model using graph
# verify the model using layer text description
model.summary()

# classifier loss, Adam optimizer, classifier accuracy
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# train the model with input images and labels
model.fit([x_train, x_train],
          y_train, 
          validation_data=([x_test, x_test], y_test),
          epochs=20,
          batch_size=batch_size)
コード例 #9
0
    def get_model(self, num_classes, activation='sigmoid'):
        max_len = opt.max_len
        max_ngram_len = opt.ngram_max_len
        voca_size = opt.unigram_hash_size + 1

        with tf.device('/gpu:0'):

            def LAYER(input1, input2, max_len=max_len):
                Avg = Dropout(rate=0.5)(input1)
                Avg = BatchNormalization()(Avg)
                Avg = GlobalAveragePooling1D()(Avg)

                mat = Reshape((max_len, 1))(input2)
                Dot = dot([input1, mat], axes=1)
                Dot = Flatten()(Dot)
                Dot = Dropout(rate=0.5)(Dot)
                Dot = BatchNormalization()(Dot)

                return Avg, Dot

            embd = Embedding(voca_size, opt.embd_size, name='uni_embd')
            ####################################
            uni = Input((max_len, ), name="t_uni")
            uni_embd = embd(uni)  # token
            w_uni = Input((max_len, ), name="w_uni")
            ####################################
            shape = Input((max_len, ), name="shape")
            shape_embd = embd(shape)
            w_shape = Input((max_len, ), name="w_shape")
            ####################################
            noun = Input((max_len, ), name="noun")
            noun_embd = embd(noun)
            w_noun = Input((max_len, ), name="w_noun")
            ####################################
            bmm = Input((max_len, ), name="bmm")
            bmm_embd = embd(bmm)
            w_bmm = Input((max_len, ), name="w_bmm")
            ####################################
            ngram = Input((max_ngram_len, ), name="ngram")
            ngram_embd = embd(ngram)
            w_ngram = Input((max_ngram_len, ), name="w_ngram")
            ####################################
            jamo3 = Input((max_len, ), name="jamo3")
            jamo_embd3 = embd(jamo3)
            w_jamo3 = Input((max_len, ), name="w_jamo3")
            ####################################
            jamo2 = Input((max_len, ), name="jamo2")
            jamo_embd2 = embd(jamo2)
            w_jamo2 = Input((max_len, ), name="w_jamo2")
            ####################################
            jamo1 = Input((max_len, ), name="jamo1")
            jamo_embd1 = embd(jamo1)
            w_jamo1 = Input((max_len, ), name="w_jamo1")
            ####################################
            img = Input((2048, ), name="image")

            uni_avg, uni_dot = LAYER(uni_embd, w_uni, max_len=max_len)
            shape_avg, shape_dot = LAYER(shape_embd, w_shape, max_len=max_len)
            noun_avg, noun_dot = LAYER(noun_embd, w_noun, max_len=max_len)
            ngram_avg, ngram_dot = LAYER(ngram_embd,
                                         w_ngram,
                                         max_len=max_ngram_len)
            jamo_avg3, jamo_dot3 = LAYER(jamo_embd3, w_jamo3, max_len=max_len)
            jamo_avg2, jamo_dot2 = LAYER(jamo_embd2, w_jamo2, max_len=max_len)
            jamo_avg1, jamo_dot1 = LAYER(jamo_embd1, w_jamo1, max_len=max_len)
            bmm_avg, bmm_dot = LAYER(bmm_embd, w_bmm, max_len=max_len)

            result = Concatenate()([
                uni_avg, uni_dot, shape_avg, shape_dot, noun_avg, noun_dot,
                ngram_avg, ngram_dot, jamo_dot3, jamo_dot2, jamo_dot1, bmm_dot,
                img
            ])

            result = Dropout(rate=0.5)(result)
            result = BatchNormalization()(result)
            result = Activation('relu')(result)
            outputs = Dense(num_classes, activation=activation)(result)
            ####################################
            model = Model(inputs=[
                uni, w_uni, shape, w_shape, noun, w_noun, bmm, w_bmm, ngram,
                w_ngram, jamo3, w_jamo3, jamo2, w_jamo2, jamo1, w_jamo1, img
            ],
                          outputs=outputs)
            optm = keras.optimizers.adam(0.0002)

            model.compile(loss='categorical_crossentropy',
                          optimizer=optm,
                          metrics=[top1_acc])
            model.summary(print_fn=lambda x: self.logger.info(x))

        return model
コード例 #10
0
def third_phase(trained=False, third_phase_train_reps=third_phase_train_reps):
    global resnet_model, new_resnet_model, optimizer
    tensorboard = TensorBoard(log_dir=third_phase_folder + 'tb_logs', batch_size=batch_size)
    
    if not trained:
        resnet_model = load_model(data_folder + '1st_phase_resnet_model.h5')
    else:
        resnet_model = load_model(data_folder + '3rd_phase_resnet_model.h5')

#     # add regularizers to the convolutional layers
#     trainable_layers_ratio = 1 / 2.0
#     trainable_layers_index = int(len(resnet_model.layers) * (1 - trainable_layers_ratio))
#     for layer in resnet_model.layers[:trainable_layers_index]:
#         layer.trainable = False
#     for layer in resnet_model.layers[trainable_layers_index:]:
#         layer.trainable = True

    for layer in resnet_model.layers:
        layer.trainable = True
        if isinstance(layer, keras.layers.convolutional.Conv2D):
            layer.kernel_regularizer = regularizers.l2(0.001)
            layer.activity_regularizer = regularizers.l1(0.001)

    # add dropout and regularizer to the penultimate Dense layer
    predictions = resnet_model.layers[-1]
    dropout = Dropout(0.2)
    fc = resnet_model.layers[-2]
    fc.kernel_regularizer = regularizers.l2(0.001)
    fc.activity_regularizer = regularizers.l1(0.001)

    x = dropout(fc.output)
    predictors = predictions(x)
    new_resnet_model = Model(inputs=resnet_model.input, outputs=predictors)

    optimizer = Adam(lr=0.1234)
    start_lr = 0.0001
    end_lr = 0.00001
    step_lr = (end_lr - start_lr) / (third_phase_train_reps - 1)
    new_resnet_model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])

    if not os.path.exists(third_phase_folder):
        os.makedirs(third_phase_folder)
        
    for i in range(third_phase_train_reps):
        lr = start_lr + step_lr * i
        K.set_value(new_resnet_model.optimizer.lr, lr)
        print(i, 'out of ', third_phase_train_reps, '\nlearning rate ', K.eval(new_resnet_model.optimizer.lr))
        history = new_resnet_model.fit_generator(train_img_class_gen,
                                               steps_per_epoch=steps_per_small_epoch,
                                               epochs=small_epochs, verbose=2,
                                               validation_data=val_img_class_gen, validation_steps=val_steps_per_small_epoch,
                                               workers=4, callbacks=[tensorboard])
#         history = new_resnet_model.fit_generator(train_img_class_gen,
#                                                    steps_per_epoch=steps_per_small_epoch,
#                                                    epochs=small_epochs, verbose=2,
#                                                    validation_data=val_img_class_gen, validation_steps=val_steps_per_small_epoch,
#                                                    workers=4, callbacks=[LosswiseKerasCallback(tag='keras xcpetion model')])
        print("iteration",i)
        if i % saves_per_epoch == 0:
            print('{} epoch completed'.format(int(i / saves_per_epoch)))

        if i>=5:
            ts = calendar.timegm(time.gmtime())
            new_resnet_model.save(third_phase_folder + str(ts) + '_resnet_model.h5')
            save_obj(history.history, str(ts) + '_xcpetion_history.h5', folder=third_phase_folder)

    new_resnet_model.save(data_folder + '3rd_phase_resnet_model.h5')
コード例 #11
0
    def _build(self,
               input_shape,
               num_outputs,
               block_fn,
               repetitions,
               with_detector=None, 
               activation=True,
               Dropout=Dropout):
        """Builds a custom ResNet like architecture.

        Args:
            input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
            num_outputs: The number of outputs at final softmax layer
            block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
                The original paper used basic_block for layers < 50
            repetitions: Number of repetitions of various block units.
                At each block unit, the number of filters are doubled and the input size is halved

        Returns:
            The keras `Model`.
        """
        self._handle_dim_ordering()
        if len(input_shape) != 3:
            raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")

        # Permute dimension order if necessary
        if K.image_dim_ordering() == 'tf':
            input_shape = (input_shape[1], input_shape[2], input_shape[0])

        # Load function from str if needed.
        block_fn = self._get_block(block_fn)
        tmp = []

        input = Input(shape=input_shape)
        tmp.append(input)
        conv1 = self._conv_bn_relu(filters=16, kernel_size=(3, 3))(input)
        tmp.append(conv1)

        block = conv1
        filters = 16
        for i, r in enumerate(repetitions):
            with tf.variable_scope("block"+str(i)):
                block = self._residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)
            tmp.append(block)
            filters *= 2

        # Last activation
        block = self._bn_relu(block)
        block = Dropout(0.5)(block)

        # Classifier block
        block_shape = K.int_shape(block)
        pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]),
                                 strides=(1, 1))(block)
        flatten1 = Flatten()(pool2)
        dense = Dense(units=num_outputs, kernel_initializer="he_normal",
                      activation="softmax" if activation else 'linear', name='classifier')(flatten1)

        outs = [dense]

        model = Model(inputs=input, outputs=outs)
        return model
コード例 #12
0
ファイル: turbo_rnn.py プロジェクト: kailunxu/ML4COM
def load_model(interleave_array,
               dec_iter_num=6,
               block_len=1000,
               network_saved_path='default',
               num_layer=2,
               learning_rate=0.001,
               num_hidden_unit=200,
               rnn_type='lstm',
               rnn_direction='bd',
               last_layer_sigmoid=True,
               loss='mean_squared_error',
               **kwargs):

    if network_saved_path == 'default':
        network_saved_path = './model_zoo/awgn_model_end2end/yihan_clean_ttbl_0.870905022927_snr_3.h5'
    else:
        network_saved_path = network_saved_path

    #rnn_type    = 'lstm'    #'gru', 'lstm'
    print '[RNN Model] using model type', rnn_type
    print '[RNN Model] using model path', network_saved_path
    ######################################
    # Encode Turbo Code
    ######################################
    batch_size = 32

    print '[RNN Model] Block length', block_len
    print '[RNN Model] Evaluate Batch size', batch_size
    print '[RNN Model] Number of decoding layers', dec_iter_num

    def errors(y_true, y_pred):
        myOtherTensor = K.not_equal(y_true, K.round(y_pred))
        return K.mean(tf.cast(myOtherTensor, tf.float32))

    ####################################################
    # Define Model
    ####################################################
    if rnn_direction == 'bd':
        if rnn_type == 'lstm':
            f1 = Bidirectional(
                LSTM(name='bidirectional_1',
                     units=num_hidden_unit,
                     activation='tanh',
                     return_sequences=True,
                     dropout=1.0))
            f2 = BatchNormalization(name='batch_normalization_1')
            f3 = Bidirectional(
                LSTM(name='bidirectional_2',
                     units=num_hidden_unit,
                     activation='tanh',
                     return_sequences=True,
                     dropout=1.0))
            f4 = BatchNormalization(name='batch_normalization_2')
        elif rnn_type == 'gru':
            f1 = Bidirectional(
                GRU(name='bidirectional_1',
                    units=num_hidden_unit,
                    activation='tanh',
                    return_sequences=True,
                    dropout=1.0))
            f2 = BatchNormalization(name='batch_normalization_1')
            f3 = Bidirectional(
                GRU(name='bidirectional_2',
                    units=num_hidden_unit,
                    activation='tanh',
                    return_sequences=True,
                    dropout=1.0))
            f4 = BatchNormalization(name='batch_normalization_2')
        else:  #SimpleRNN
            f1 = Bidirectional(
                SimpleRNN(name='bidirectional_1',
                          units=num_hidden_unit,
                          activation='tanh',
                          return_sequences=True,
                          dropout=1.0))
            f2 = BatchNormalization(name='batch_normalization_1')
            f3 = Bidirectional(
                SimpleRNN(name='bidirectional_2',
                          units=num_hidden_unit,
                          activation='tanh',
                          return_sequences=True,
                          dropout=1.0))
            f4 = BatchNormalization(name='batch_normalization_2')

    elif rnn_direction == 'sd':
        if rnn_type == 'lstm':
            f1 = LSTM(name='lstm_1',
                      units=num_hidden_unit,
                      activation='tanh',
                      return_sequences=True,
                      dropout=1.0)
            f2 = BatchNormalization(name='batch_normalization_1')
            f3 = LSTM(name='lstm_2',
                      units=num_hidden_unit,
                      activation='tanh',
                      return_sequences=True,
                      dropout=1.0)
            f4 = BatchNormalization(name='batch_normalization_2')
        elif rnn_type == 'gru':
            f1 = GRU(name='gru_1',
                     units=num_hidden_unit,
                     activation='tanh',
                     return_sequences=True,
                     dropout=1.0)
            f2 = BatchNormalization(name='batch_normalization_1')
            f3 = GRU(name='gru_2',
                     units=num_hidden_unit,
                     activation='tanh',
                     return_sequences=True,
                     dropout=1.0)
            f4 = BatchNormalization(name='batch_normalization_2')
        else:  #SimpleRNN
            f1 = SimpleRNN(name='simple_rnn_1',
                           units=num_hidden_unit,
                           activation='tanh',
                           return_sequences=True,
                           dropout=1.0)
            f2 = BatchNormalization(name='batch_normalization_1')
            f3 = SimpleRNN(name='simple_rnn_2',
                           units=num_hidden_unit,
                           activation='tanh',
                           return_sequences=True,
                           dropout=1.0)
            f4 = BatchNormalization(name='batch_normalization_2')
    else:
        print '[RNN Model]RNN direction not supported, exit'
        import sys
        sys.exit()

    f5 = TimeDistributed(Dense(1), name='time_distributed_1')

    if last_layer_sigmoid:
        f6 = TimeDistributed(Dense(1, activation='sigmoid'),
                             name='time_distributed_sigmoid')
    else:
        f6 = TimeDistributed(Dense(1), name='time_distributed_sigmoid')

    inputs = Input(shape=(block_len, 5))
    #interleave_array = interleaver.p_array
    interleave_array = interleave_array

    def split_data_0(x):
        x1 = x[:, :, 0:3]
        return x1

    def split_data_1(x):
        x1 = x[:, :, 0:2]
        return x1

    def split_data_2(x):
        xx = x[:, :, 3:5]
        return xx

    def takeLL(x):
        #x1_out = x[:,:,0]
        return tf.reshape(x[:, :, 0] - x[:, :, 3],
                          [tf.shape(x[:, :, 0])[0], block_len, 1])
        #return tf.reshape(x[:,:,0],[tf.shape(x)[0],block_len,1])
        #return x

    def concat(x):
        return K.concatenate(x)

    takell = Lambda(takeLL)
    lambda_concat = Lambda(concat)
    DeInt = DeInterleave(interleave_array=interleave_array)
    DoInt = Interleave(interleave_array=interleave_array)

    def subtr(x2):
        # x2_out = f5(f4(f3(f2(f1(x2)))))
        # return x2_out
        if num_layer == 2:
            x2_out = f5(f4(f3(f2(f1(x2)))))
        elif num_layer == 1:
            x2_out = f5(f2(f1(x2)))
        else:
            print 'other layer not supported!'
            return
        x2_temp = lambda_concat([x2_out, x2])
        x2 = takell(x2_temp)
        return x2

    # def subtr_sigmoid(x2):
    #     x2_out = f6(f4(f3(f2(f1(x2)))))
    #     x2_temp = Lambda(concat)([x2_out, x2])
    #     x2 = Lambda(takeLL)(x2_temp)
    #     return x2

    x_input_1 = Lambda(split_data_1,
                       name='split_data_normal')(inputs)  # sys, par1
    x_input_2 = Lambda(split_data_2,
                       name='split_data_interleave')(inputs)  # sys_i, par2

    x1 = Lambda(split_data_0,
                name='three')(inputs)  # sys, par1, 0 (initial likelihood)
    x1 = subtr(x1)  #x1 = f5(f4(f3(f2(f1(x1)))))
    x1 = DoInt(x1)

    x2 = lambda_concat([x_input_2, x1])
    x2 = subtr(x2)  #x2 = f5(f4(f3(f2(f1(x2)))))
    x2 = DeInt(x2)

    for dec_iter in range(dec_iter_num - 2):
        x3 = lambda_concat([x_input_1, x2])
        x3 = subtr(x3)  #x3 = f5(f4(f3(f2(f1(x3)))))
        x3 = DoInt(x3)

        x4 = lambda_concat([x_input_2, x3])
        x4 = subtr(x4)
        x4 = DeInt(x4)
        x2 = x4

    x3 = lambda_concat([x_input_1, x2])
    x3 = subtr(x3)  #x3 = f5(f4(f3(f2(f1(x3)))))
    x3 = DoInt(x3)

    x4 = lambda_concat([x_input_2, x3])

    if num_layer == 2:
        x4 = f6(f4(f3(f2(f1(x4)))))
    elif num_layer == 1:
        x4 = f6(f2(f1(x4)))

    x4 = DeInt(x4)

    predictions = x4

    model = Model(inputs=inputs, outputs=predictions)
    optimizer = keras.optimizers.adam(lr=learning_rate, clipnorm=1.0)
    model.compile(optimizer=optimizer, loss=loss, metrics=[errors])

    try:
        model.load_weights(network_saved_path, by_name=True)
    except:
        print '[RNN Model][Warning]loading weight fails!'

    #print model.summary()

    layer_from = model.get_layer('time_distributed_1')
    weights = layer_from.get_weights()
    layer_to = model.get_layer('time_distributed_sigmoid')
    layer_to.set_weights(weights)

    return model
コード例 #13
0
 def creat_generator(self):
     # layer 0
     d0 = Input(shape=self.image_shape)
     # layer 1
     d1 = Conv2D(filters=64, kernel_size=4, strides=2, padding='same')(d0)
     d1 = LeakyReLU(alpha=0.2)(d1)
     # layer 2
     d2 = Conv2D(filters=128, kernel_size=4, strides=2, padding='same')(d1)
     d2 = LeakyReLU(alpha=0.2)(d2)
     d2 = BatchNormalization(momentum=0.8)(d2)
     # layer 3
     d3 = Conv2D(filters=256, kernel_size=4, strides=2, padding='same')(d2)
     d3 = LeakyReLU(alpha=0.2)(d3)
     d3 = BatchNormalization(momentum=0.8)(d3)
     # layer 4
     d4 = Conv2D(filters=512, kernel_size=4, strides=2, padding='same')(d3)
     d4 = LeakyReLU(alpha=0.2)(d4)
     d4 = BatchNormalization(momentum=0.8)(d4)
     # layer 5
     d5 = Conv2D(filters=512, kernel_size=4, strides=2, padding='same')(d4)
     d5 = LeakyReLU(alpha=0.2)(d5)
     d5 = BatchNormalization(momentum=0.8)(d5)
     # layer 6
     d6 = Conv2D(filters=512, kernel_size=4, strides=2, padding='same')(d5)
     d6 = LeakyReLU(alpha=0.2)(d6)
     d6 = BatchNormalization(momentum=0.8)(d6)
     # layer 7
     d7 = Conv2D(filters=512, kernel_size=4, strides=2, padding='same')(d6)
     d7 = LeakyReLU(alpha=0.2)(d7)
     d7 = BatchNormalization(momentum=0.8)(d7)
     # layer 6
     u6 = UpSampling2D(size=2)(d7)
     u6 = Conv2D(filters=512, kernel_size=4, strides=1, padding='same', activation='relu')(u6)
     u6 = BatchNormalization(momentum=0.8)(u6)
     u6 = Concatenate()([u6, d6])
     # layer 5
     u5 = UpSampling2D(size=2)(u6)
     u5 = Conv2D(filters=512, kernel_size=4, strides=1, padding='same', activation='relu')(u5)
     u5 = BatchNormalization(momentum=0.8)(u5)
     u5 = Concatenate()([u5, d5])
     # layer 4
     u4 = UpSampling2D(size=2)(u5)
     u4 = Conv2D(filters=512, kernel_size=4, strides=1, padding='same', activation='relu')(u4)
     u4 = BatchNormalization(momentum=0.8)(u4)
     u4 = Concatenate()([u4, d4])
     # layer 3
     u3 = UpSampling2D(size=2)(u4)
     u3 = Conv2D(filters=256, kernel_size=4, strides=1, padding='same', activation='relu')(u3)
     u3 = BatchNormalization(momentum=0.8)(u3)
     u3 = Concatenate()([u3, d3])
     # layer 2
     u2 = UpSampling2D(size=2)(u3)
     u2 = Conv2D(filters=128, kernel_size=4, strides=1, padding='same', activation='relu')(u2)
     u2 = BatchNormalization(momentum=0.8)(u2)
     u2 = Concatenate()([u2, d2])
     # layer 1
     u1 = UpSampling2D(size=2)(u2)
     u1 = Conv2D(filters=64, kernel_size=4, strides=1, padding='same', activation='relu')(u1)
     u1 = BatchNormalization(momentum=0.8)(u1)
     u1 = Concatenate()([u1, d1])
     # layer 0
     u0 = UpSampling2D(size=2)(u1)
     u0 = Conv2D(self.nC, kernel_size=4, strides=1, padding='same', activation='tanh')(u0)
     return Model(d0, u0)
コード例 #14
0
    def __init__(self, n_cols, number_layers=6, node_size=100,
                 prob_dropout=0.1, sparsity_const=10e-5, activation='relu', different_size=None,
                 sampling=None, beta=1, nodes_range='auto'):
        """
        :param n_cols: Number of columns of the dataset
        :param number_layers: Number of total layers in the network (without considering the output node)
        :param node_size: Number of nodes per layer
        :param prob_dropout: proportion to dropout
        :param sparsity_const: Restrict some nodes and not all (as PCA), using regularization strategy
        :param activation: Activation function
        :param different_size: Different sizes in the nodes between root and auxiliars
        """
        self.n_cols = n_cols
        self.activation = activation
        self.prob_dropout = prob_dropout
        self.number_layers = number_layers
        self.node_size = node_size
        self.sparsity_const = sparsity_const
        self.sampling = sampling
        self.beta = beta
        self.nodes_range = nodes_range

        input_layer = Input(shape=(n_cols,))
        if nodes_range == 'auto':
            nodes_range = range(n_cols - node_size*2, node_size - 1, -node_size)
        else:
            nodes_range = self.nodes_range
        print(nodes_range)
        # RESIDUAL LAYER
        if sparsity_const is not None:
            residual = layers.Dense(node_size, activation=self.activation, name='residual_layer_' + str(node_size),
                                    activity_regularizer=
                                    regularizers.l1(sparsity_const))(input_layer)
        else:
            residual = layers.Dense(node_size, activation=self.activation, name='root_layer_' + str(node_size))(input_layer)

        y = residual
        print('residual', y)

        # ROOT LAYERS
        if different_size is None:
            for nodes in nodes_range:
                print(nodes)
                if sparsity_const is not None:
                    y = layers.Dense(node_size, activation=self.activation, activity_regularizer=
                    regularizers.l1(sparsity_const))(y)
                else:
                    y = layers.Dense(node_size, activation=self.activation)(y)
                if self.prob_dropout is not None:
                    y = layers.Dropout(self.prob_dropout)(y)
                print(y)
        else:
            for nodes in nodes_range:
                if sparsity_const is not None:
                    y = residual
                    y = layers.Dense(nodes, activation=self.activation, name='root_layer_'+str(nodes), activity_regularizer=
                    regularizers.l1(sparsity_const))(y)
                else:
                    y = layers.Dense(nodes + different_size, activation=self.activation, name='root_layer_' + str(nodes))(y)
                if self.prob_dropout is not None:
                    y = layers.Dropout(self.prob_dropout)(y)

            residual = layers.Dense(node_size + different_size)(residual)

        y = layers.add([y, residual])
        output_tensor = layers.Dense(2, activation='softmax')(y)

        self.model = Model(input_layer, output_tensor)
        print(self.model.summary())
コード例 #15
0
# mean and variance of latent variables
z_mu = Dense(latent_dim, name='latent_mu')(x)   
z_sigma = Dense(latent_dim, name='latent_sigma')(x)  


# Reparameterization trick (Refer to relevant research regarding why this is required)
def sample_z(args):
  z_mu, z_sigma = args
  eps = K.random_normal(shape=(K.shape(z_mu)[0], K.int_shape(z_mu)[1]))
  return z_mu + K.exp(z_sigma / 2) * eps

# Sample vector from latent distribution
z = Lambda(sample_z, output_shape=(latent_dim, ), name='z')([z_mu, z_sigma])

# Print summary of encoder model for testing.
encoder = Model(input_seq, [z_mu, z_sigma, z], name='encoder')
print(encoder.summary())


# Decoder
# These layers and sizes are completely arbitrary and could be experimented with
decoder_input = Input(shape=(latent_dim, ), name='decoder_input')
x = Dense(temp_shape[1]*temp_shape[2]*temp_shape[3], activation='relu')(decoder_input)
x = Reshape((temp_shape[1], temp_shape[2], temp_shape[3]))(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(1, activation='sigmoid', name='decoder_output')(x)

decoder = Model(decoder_input, x, name='decoder')
decoder.summary()
コード例 #16
0
def SEInceptionResNetV2(include_top=True,
                        weights=None,
                        input_tensor=None,
                        input_shape=None,
                        pooling=None,
                        classes=3):
    """Instantiates the SE-Inception-ResNet v2 architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that when using TensorFlow, for best performance you should
    set `"image_data_format": "channels_last"` in your Keras config
    at `~/.keras/keras.json`.
    The model and the weights are compatible with both TensorFlow and Theano
    backends (but not CNTK). The data format convention used by the model is
    the one specified in your Keras config file.
    Note that the default input image size for this model is 299x299, instead
    of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing
    function is different (i.e., do not use `imagenet_utils.preprocess_input()`
    with this model. Use `preprocess_input()` defined in this module instead).
    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or `'imagenet'` (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is `False` (otherwise the input shape
            has to be `(299, 299, 3)` (with `'channels_last'` data format)
            or `(3, 299, 299)` (with `'channels_first'` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 139.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the last convolutional layer.
            - `'avg'` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `'max'` means that global max pooling will be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is `True`, and
            if no `weights` argument is specified.
    # Returns
        A Keras `Model` instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with an unsupported backend.
    """
    if K.backend() in {'cntk'}:
        raise RuntimeError(K.backend() + ' backend is currently unsupported for this model.')

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = (256, 256, 1)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # Stem block: 35 x 35 x 192
    x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
    x = conv2d_bn(x, 32, 3, padding='valid')
    x = conv2d_bn(x, 64, 3)
    x = MaxPooling2D(3, strides=2)(x)
    x = conv2d_bn(x, 80, 1, padding='valid')
    x = conv2d_bn(x, 192, 3, padding='valid')
    x = MaxPooling2D(3, strides=2)(x)

    # Mixed 5b (Inception-A block): 35 x 35 x 320
    branch_0 = conv2d_bn(x, 96, 1)
    branch_1 = conv2d_bn(x, 48, 1)
    branch_1 = conv2d_bn(branch_1, 64, 5)
    branch_2 = conv2d_bn(x, 64, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3)
    branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
    x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
    for block_idx in range(1, 11):
        x = inception_resnet_block(x,
                                   scale=0.17,
                                   block_type='block35',
                                   block_idx=block_idx)

    # Mixed 6a (Reduction-A block): 17 x 17 x 1088
    branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(x, 256, 1)
    branch_1 = conv2d_bn(branch_1, 256, 3)
    branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
    branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_pool]
    x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
    for block_idx in range(1, 21):
        x = inception_resnet_block(x,
                                   scale=0.1,
                                   block_type='block17',
                                   block_idx=block_idx)

    # Mixed 7a (Reduction-B block): 8 x 8 x 2080
    branch_0 = conv2d_bn(x, 256, 1)
    branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(x, 256, 1)
    branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
    branch_2 = conv2d_bn(x, 256, 1)
    branch_2 = conv2d_bn(branch_2, 288, 3)
    branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
    branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
    for block_idx in range(1, 10):
        x = inception_resnet_block(x,
                                   scale=0.2,
                                   block_type='block8',
                                   block_idx=block_idx)
    x = inception_resnet_block(x,
                               scale=1.,
                               activation=None,
                               block_type='block8',
                               block_idx=10)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # Final convolution block: 8 x 8 x 1536
    x = conv2d_bn(x, 1536, 1, name='conv_7b')

    if include_top:
        # Classification block
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model
    model = Model(inputs, x, name='se_inception_resnet_v2')

    return model
コード例 #17
0
# plt.show()

input_img = Input(shape=(28, 28, 1))

x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)

x = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['acc'])

autoencoder.summary()

filepath = 'compress/denoising.h5'
cp = ModelCheckpoint("denoising.h5", monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
tb = TensorBoard(log_dir='compress/', histogram_freq=0, write_graph=False)

autoencoder.fit(x_train_noisy, x_train,
                epochs=100,
                batch_size=128,
                shuffle=True,
                validation_data=(x_test_noisy, x_test),
                callbacks=[tb, cp])
コード例 #18
0
# Create train test split of 60% to 40%
train_cv_X, test_X, train_cv_y, test_y = train_test_split(X_, y_, test_size=0.3, random_state=42)

# Enable Tensorboard to monitor training progress and observe model architecture
tBoard = TensorBoard(log_dir='./TensorBoard', histogram_freq=0, write_graph=True, write_images=True)

input_ = Input((sampleWindow, nSensors))

lstm = LSTM(300)(input_)

dense1 = Dense(800, activation='relu')(lstm)
dense1 = Dense(400, activation='relu')(dense1)

output_ = Dense(7, activation='softmax')(dense1)

clf = Model(inputs=input_, outputs=output_)

print(clf.summary())
clf.compile('adam', loss='categorical_crossentropy', metrics=['accuracy'])

# Define callbacks for reducing learning rate and early stopping
reduce_lr = ReduceLROnPlateau(patience=2, factor=0.2, min_lr=1.6e-6, monitor='val_loss')
stopEarly = EarlyStopping(patience=10, monitor='val_loss')

# Fit model to training data
clf.fit(train_cv_X, train_cv_y, epochs=50, batch_size=32, callbacks=[reduce_lr, stopEarly], validation_data=(test_X, test_y))

# Save model to current directory
clf.save('activityDetector.h5f')

# Load model from current directory
コード例 #19
0
left = Dropout(0.3)(left)

#Now do the same for right side as well
right_input = Input(shape=(Xtrain_right.shape[1],),dtype='int32')

right = Embedding(input_dim=vocab_size, output_dim=word_embed_size, 
                   input_length=seq_maxlen, weights=[embedding_weight_matrix], 
                   trainable = False) (right_input)
right = LSTM(100, return_sequences=False)(right)
right = Dropout(0.3)(right)

x = concatenate([left, right])
x = Dense(10, activation='relu')(x)
output = Dense(1)(x) #Tells the similarity

model = Model(inputs=[left_input, right_input], outputs=output)
#print(model.summary())

model.compile(optimizer="adam", loss="mean_squared_error")

model.fit([Xtrain_left, Xtrain_right], ytrain, batch_size=batch_size,
          epochs=epochs, validation_split=0.2)

#plot_loss(history)

#predict the sentence similarity on test data
sentence_left_test, sentence_right_test, scores_test = load_data(os.path.join(test_dir, 'test.txt'))

sentence_left_test1 = list(map(cleanSentence, sentence_left_test))
sentence_right_test1 = list(map(cleanSentence, sentence_right_test))
コード例 #20
0
ファイル: googLeNet.py プロジェクト: lismd/CNN-CV-algorithms
def define_model(weight_path=None):
    input = Input(shape=(224, 224, 3))

    conv1_7x7_s2 = Conv2D(filters=64,
                          kernel_size=(7, 7),
                          strides=(2, 2),
                          padding='same',
                          activation='relu',
                          kernel_regularizer=l2(0.01))(input)

    maxpool1_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding='same')(conv1_7x7_s2)

    conv2_3x3_reduce = Conv2D(filters=64,
                              kernel_size=(1, 1),
                              padding='same',
                              activation='relu',
                              kernel_regularizer=l2(0.01))(maxpool1_3x3_s2)

    conv2_3x3 = Conv2D(filters=192,
                       kernel_size=(3, 3),
                       padding='same',
                       activation='relu',
                       kernel_regularizer=l2(0.01))(conv2_3x3_reduce)

    maxpool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding='same')(conv2_3x3)

    inception_3a = inception_model(input=maxpool2_3x3_s2,
                                   filters_1x1=64,
                                   filters_3x3_reduce=96,
                                   filters_3x3=128,
                                   filters_5x5_reduce=16,
                                   filters_5x5=32,
                                   filters_pool_proj=32)

    inception_3b = inception_model(input=inception_3a,
                                   filters_1x1=128,
                                   filters_3x3_reduce=128,
                                   filters_3x3=192,
                                   filters_5x5_reduce=32,
                                   filters_5x5=96,
                                   filters_pool_proj=64)

    maxpool3_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding='same')(inception_3b)

    inception_4a = inception_model(input=maxpool3_3x3_s2,
                                   filters_1x1=192,
                                   filters_3x3_reduce=96,
                                   filters_3x3=208,
                                   filters_5x5_reduce=16,
                                   filters_5x5=48,
                                   filters_pool_proj=64)

    inception_4b = inception_model(input=inception_4a,
                                   filters_1x1=160,
                                   filters_3x3_reduce=112,
                                   filters_3x3=224,
                                   filters_5x5_reduce=24,
                                   filters_5x5=64,
                                   filters_pool_proj=64)

    inception_4c = inception_model(input=inception_4b,
                                   filters_1x1=128,
                                   filters_3x3_reduce=128,
                                   filters_3x3=256,
                                   filters_5x5_reduce=24,
                                   filters_5x5=64,
                                   filters_pool_proj=64)

    inception_4d = inception_model(input=inception_4c,
                                   filters_1x1=112,
                                   filters_3x3_reduce=144,
                                   filters_3x3=288,
                                   filters_5x5_reduce=32,
                                   filters_5x5=64,
                                   filters_pool_proj=64)

    inception_4e = inception_model(input=inception_4d,
                                   filters_1x1=256,
                                   filters_3x3_reduce=160,
                                   filters_3x3=320,
                                   filters_5x5_reduce=32,
                                   filters_5x5=128,
                                   filters_pool_proj=128)

    maxpool4_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding='same')(inception_4e)

    inception_5a = inception_model(input=maxpool4_3x3_s2,
                                   filters_1x1=256,
                                   filters_3x3_reduce=160,
                                   filters_3x3=320,
                                   filters_5x5_reduce=32,
                                   filters_5x5=128,
                                   filters_pool_proj=128)

    inception_5b = inception_model(input=inception_5a,
                                   filters_1x1=384,
                                   filters_3x3_reduce=192,
                                   filters_3x3=384,
                                   filters_5x5_reduce=48,
                                   filters_5x5=128,
                                   filters_pool_proj=128)

    averagepool1_7x7_s1 = AveragePooling2D(pool_size=(7, 7),
                                           strides=(7, 7),
                                           padding='same')(inception_5b)

    drop1 = Dropout(rate=0.4)(averagepool1_7x7_s1)

    linear = Dense(units=1000,
                   activation='softmax',
                   kernel_regularizer=l2(0.01))(
                       keras.layers.core.Flatten(drop1))
    last = linear

    model = Model(inputs=input, outputs=last)
    model.summary()
コード例 #21
0
ファイル: cali.py プロジェクト: Hong-Xiang/XLearning
def model_define_():
    is_cata = False
    reps = []
    ip = Input(shape=(10, 10, 2))
    ipc = Input(shape=(1,))
    h = Conv2D(32, 3, activation='elu')(ip)
    h = MaxPool2D()(h)
    reps.append(Flatten(name='rep0')(h))

    h = Conv2D(128, 3, activation='elu')(h)
    h = MaxPool2D()(h)
    h = Dropout(0.5)(h)
    # h = Conv2D(256, 3, activation='elu')(h)
    # h = Dropout(0.5)(h)
    # h = Conv2D(512, 3, activation='elu')(h)
    reps.append(Flatten(name='rep1')(h))

    h = Conv2D(8, 3, activation='elu', padding='same')(ip)
    h = MaxPool2D()(h)
    h = Conv2D(32, 3, activation='elu', padding='same')(h)
    h = MaxPool2D()(h)
    h = Conv2D(128, 3, activation='elu', padding='same')(h)
    h = MaxPool2D()(h)
    h = Conv2D(512, 1, activation='elu', padding='same')(h)
    h = Dropout(0.5)(h)
    h = Flatten(name='rep2')(h)
    reps.append(h)

    h = Conv2D(8, 3, activation='elu')(ip)
    h = Conv2D(16, 3, activation='elu')(h)
    h = Conv2D(32, 3, activation='elu')(h)
    h = Conv2D(64, 3, activation='elu')(h)
    h = Conv2D(64, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep3')(h))

    h = Conv2D(8, 5, activation='elu', padding='same')(ip)
    h = MaxPool2D()(h)
    h = Conv2D(32, 5, activation='elu', padding='same')(h)
    h = MaxPool2D()(h)
    h = Conv2D(64, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep4')(h))

    h = Conv2D(32, 5, activation='elu')(ip)
    h = Conv2D(64, 5, activation='elu')(h)
    h = Conv2D(64, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep5')(h))

    h = Flatten()(ip)
    reps.append(h)
    for i in range(2):
        h = Dense(128, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Dense(128, activation='elu', name='rep6')(h))

    h = Conv2D(8, 5, activation='elu', padding='same')(ip)
    h = MaxPool2D(pool_size=(1, 2))(h)
    h = Conv2D(16, 5, activation='elu', padding='same')(h)
    h = MaxPool2D(pool_size=(1, 2))(h)
    h = Conv2D(32, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep7')(h))

    reps.append(ipc)
    h = concatenate(reps)
    h = Dense(1024, activation='elu')(h)
    h = Dropout(0.5)(h)
    h = Dense(1024, activation='elu')(h)
    h = Dropout(0.5)(h)
    out = Dense(1)(h)
    out = add([out, ipc])
    m = Model([ip, ipc], out)
    opt = Adam(lr=1e-3)
    m.compile(loss='mse', optimizer=opt)
    m.summary()
    return m
コード例 #22
0
# critic model
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = merge([action_input, flattened_observation], mode='concat')

# Hidden layers
for _ in range(NUM_HIDDEN_LAYERS):
    x = (Dense(LAYER_SIZE))(x)
    x = Activation('relu')(x)

# Output Layer
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(input=[action_input, observation_input], output=x)
print(critic.summary())

# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=2*NUM_STEPS, window_length=1)
# random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)
random_process = OrnsteinUhlenbeckProcess(size=nb_actions, dt = env.tau, theta=0.6, mu=0.0, sigma=0.5, sigma_min=0.15, n_steps_annealing=NUM_STEPS)

agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
                  memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
                  random_process=random_process, gamma=.999, target_model_update=1e-3,
                  delta_clip=1.0)

agent.compile(Adam(lr=.001, clipnorm=1.0), metrics=['mae'])
コード例 #23
0
ファイル: keras_test.py プロジェクト: grjd/keras
# store the meand and std to be used after for porediction
Xmeans = scaler.mean_
Xstds = scaler.scale_
# the target variable is the fourthn columun
y= Xscaled[:,3]
# delete the target variable from the input (training data)
X = np.delete(Xscaled, 3, axis=1)
# split training data inot 70 training and 30 testing
train_size = int(0.7*X.shape[0])
Xtrain, Xtest, ytrain, ytest = X[0:train_size], X[train_size:],y[0:train_size], y[train_size:] 
# define the network, a 2 layer dense netweork takes the 12 features and outputs ascaled prediction
# the hidden layer has 8 neurons, initialization, loss function (mse) and optimizer (adam)
readings = Input(shape=(12,))
x = Dense(8, activation="relu", kernel_initializer="glorot_uniform")(readings)
benzene = Dense(1, kernel_initializer="glorot_uniform")(x)
model = Model(inputs=[readings], output=[benzene])
model.compile(loss="mse", optimizer="adam")
# train the model with EPOCS and BATCH_SIZE
NUM_EPOCHS = 20
BATCH_SIZE = 10
history = model.fit(Xtrain, ytrain, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS, validation_split=0.2)
# check our model predictions
ytest_ = model.predict(Xtest).flatten()
for i in range(ytest.shape[0]):
	label = (ytest[i]*Xstds[3]) + Xmeans[3]
	prediction = (ytest_[i]*Xstds[3]) + Xmeans[3]
	print("Target benzene expected:{:.3f}, predicted:{:.3f}".format( label, prediction))
# Plot the results, actual values against the predictions
plt.plot(np.arange(ytest.shape[0]),(ytest * Xstds[3]) / Xmeans[3], color = "b", label="actual" )
plt.plot(np.arange(ytest_.shape[0]),(ytest_ * Xstds[3]) / Xmeans[3], color = "r", label="predicted" )
plt.xlabel("time")
コード例 #24
0
ファイル: hw5_3.py プロジェクト: HuaTsai/NCTU_Courses
                        padding='valid',
                        activation='relu',
                        strides=1)(tweet_encoder)
trigram_branch = GlobalMaxPooling1D()(trigram_branch)
fourgram_branch = Conv1D(filters=100,
                         kernel_size=4,
                         padding='valid',
                         activation='relu',
                         strides=1)(tweet_encoder)
fourgram_branch = GlobalMaxPooling1D()(fourgram_branch)
merged = concatenate([bigram_branch, trigram_branch, fourgram_branch], axis=1)
merged = Dense(256, activation='relu')(merged)
merged = Dropout(0.2)(merged)
merged = Dense(1)(merged)
output = Activation('sigmoid')(merged)
model = Model(inputs=[tweet_input], outputs=[output])
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
print('Model summary')
print(model.summary())

# %%
filepath = "CNN_best_weights.{epoch:02d}-{val_acc:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max')
model.fit(x_train_seq,
          y_train,
def get_sentence_attention_combined_output(word_model, word_length,
                                           sent_length, n_classes):
    #x = Permute((2,1))(si_vects)
    nclasses = n_classes
    input = Input(shape=(sent_length, word_length), dtype='int32')
    print(' input to sentence attn network', word_model)
    attentions_pred = []
    #print(output.summary())
    si_vects = TimeDistributed(word_model)(input)
    print('Shape after si_vects', si_vects.shape)
    u_it = TimeDistributed(TimeDistributed(Dense(100,
                                                 activation='tanh')))(si_vects)
    print('Shape after word vector', u_it.shape)
    #h_it = TimeDistributed(Reshape((100,word_length)))(si_vects)
    #print('Shape after reshape word vector',h_it.shape)

    attn_final_word = [
        TimeDistributed(ATTNWORD(1))(u_it) for i in range(nclasses)
    ]
    #a_it = Reshape(( word_length, 1))(a_it)
    #h_it = Reshape((word_length, 512))(h_it)
    print('ATTN Shape', attn_final_word[0].shape)
    attn_final_word = [
        Multiply()([si_vects, attn_final_word[i]]) for i in range(nclasses)
    ]  #Multiply()([h_it,a_it])
    print('Multi word Shape', attn_final_word[0].shape)
    attn_final_word = [
        Reshape((sent_length, 100, word_length))(attn_final_word[i])
        for i in range(nclasses)
    ]
    print('Shape of the att1 is {}'.format(attn_final_word[0].shape))
    attn_final_word = [
        Lambda(lambda x: K.sum(x, axis=3))(attn_final_word[i])
        for i in range(nclasses)
    ]
    print('Shape of the lambda word is {}'.format(attn_final_word[0].shape))
    attn_sents_for_all_classes = []
    for i in range(nclasses):
        x = Bidirectional(GRU(50, return_sequences=True))(attn_final_word[i])
        #x = Bidirectional(LSTM(256,return_sequences=True))(x)
        print('Shape after BD LSTM', x.shape)
        #x1 = Permute((2,1))(x)
        #print('Shape after permute',x1.shape)
        u_it = TimeDistributed(Dense(100, activation='tanh'))(x)
        print('Shape after word vector', u_it.shape)
        #h_it = Reshape((100,sent_length))(x)
        attn_final_sent = ATTNWORD(1)(u_it)
        print('Shape of the sent att is {}'.format(attn_final_sent.shape))
        #attentions_pred.append(attn_final)
        attn_final_sent = Multiply()([x, attn_final_sent])
        print('Shape of the multi sent att is {}'.format(
            attn_final_sent.shape))
        attn_final_sent = Reshape((100, sent_length))(attn_final_sent)
        attn_final_sent = Lambda(lambda x: K.sum(x, axis=2))(attn_final_sent)
        print('Shape of the lambda sent att is {}'.format(
            attn_final_sent.shape))
        attn_sents_for_all_classes.append(attn_final_sent)
    x = Concatenate()(attn_sents_for_all_classes)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.2)(x)
    #x = Dense(128, activation='relu')(x)
    #x = Dropout(0.2)(x)
    #x = Dense(64, activation='relu')(x)
    #x = Dropout(0.2)(x)
    x = Dense(64, activation='relu')(x)
    preds = Dense(nclasses, activation='sigmoid')(x)

    model = Model(input, preds)

    return model
__, h_state, c_state = LSTM(context_dim,return_state=True)(encoder_middle_layer)
#discard outputs and keep states
encoder_final_state = [ h_state , c_state ]

decoder_input_layer = Input( shape = (None,word_dim) )
decoder_first_layer = LSTM(context_dim,return_sequences=True,return_state=True)
decoder_outputs,__,__ = decoder_first_layer(decoder_input_layer,initial_state=encoder_final_state)
decoder_dense = Dense(word_dim,activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)

auto_decoder_input_layer = Input( shape = (None,word_dim) )
auto_decoder_first_layer = LSTM(context_dim,return_sequences=True,return_state=True)
auto_decoder_outputs,__,__ = auto_decoder_first_layer(auto_decoder_input_layer,initial_state=encoder_final_state)
auto_decoder_dense = Dense(word_dim,activation="softmax")
auto_decoder_outputs = auto_decoder_dense(auto_decoder_outputs)
training_model = Model([encoder_input_layer,decoder_input_layer,auto_decoder_input_layer],[decoder_outputs,auto_decoder_outputs])
training_model.load_weights('checkpoint.h5')

encoder_model = Model(encoder_input_layer,encoder_final_state)
encoder_model.summary()

decoder_input_h_state = Input( shape = (context_dim,))
decoder_input_c_state = Input( shape = (context_dim,))
decoder_initial_states = [ decoder_input_h_state , decoder_input_c_state ]
decoder_outputs, state_h, state_c = decoder_first_layer( decoder_input_layer , initial_state=decoder_initial_states )
decoder_current_states = [ state_h , state_c ]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model( [decoder_input_layer]+decoder_initial_states , [decoder_outputs]+decoder_current_states )
decoder_model.summary()
while True:
	input_sentence = sos+" "+input("Enter filipino sentence to be translated to english:").strip()+" "+eos
コード例 #27
0
def input_model(x_train, y_train):
    # model =
    inputs = Input(shape=(512, 512, 1))
    #####################################
    # Backbone Down sampling convolution followed by max-pooling
    #####################################
    c11 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(inputs)
    c13 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(c11)
    #####################################
    d1 = MaxPool2D((2, 2), (2, 2))(c13)
    #####################################
    c21 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(d1)
    c23 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(c21)
    #####################################
    d2 = MaxPool2D((2, 2), (2, 2))(c23)
    #####################################
    c31 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(d2)
    c33 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(c31)
    #####################################
    d3 = MaxPool2D((2, 2), (2, 2))(c33)
    #####################################
    c41 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(d3)
    c43 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(c41)
    #####################################
    d4 = MaxPool2D((2, 2), (2, 2))(c43)
    #####################################
    c51 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(d4)
    c53 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(c51)
    #####################################
    # Up sampling convolution followed by up-sampling
    #####################################
    u1 = UpSampling2D((2, 2))(c53)
    #####################################
    skip4 = keras.layers.Concatenate()([c43, u1])
    c61 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(skip4)
    c63 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(c61)
    #####################################
    u2 = UpSampling2D((2, 2))(c63)
    #####################################
    skip3 = keras.layers.Concatenate()([c33, u2])
    c71 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(skip3)
    c73 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(c71)
    #####################################
    u3 = UpSampling2D((2, 2))(c73)
    #####################################
    skip2 = keras.layers.Concatenate()([c23, u3])
    c81 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(skip2)
    c83 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(c81)
    #####################################
    u4 = UpSampling2D((2, 2))(c83)
    #####################################
    skip1 = keras.layers.Concatenate()([c13, u4])
    c91 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(skip1)
    c93 = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(c91)
    #####################################
    # Output layer
    #####################################
    output = (Conv2D(1, (1, 1), padding='same', activation='sigmoid'))(c93)
    #####################################
    model = Model(inputs=inputs, outputs=output)
    #####################################

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['acc'])

    out = model.fit(x_train, y_train,
                    batch_size=16,
                    epochs=10,
                    validation_split=0.1)


    return model, out
コード例 #28
0

img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(C.num_rois, 4))
feature_map_input = Input(shape=input_shape_features)

# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)

# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)

classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True)

model_rpn = Model(img_input, rpn_layers)
model_classifier_only = Model([feature_map_input, roi_input], classifier)

model_classifier = Model([feature_map_input, roi_input], classifier)

print('Loading weights from {}'.format(C.model_path))
model_rpn.load_weights(C.model_path, by_name=True)
model_classifier.load_weights(C.model_path, by_name=True)

model_rpn.compile(optimizer='sgd', loss='mse')
model_classifier.compile(optimizer='sgd', loss='mse')

all_imgs = []

classes = {}
コード例 #29
0
                          output_dim=1,
                          embeddings_initializer='orthogonal',
                          embeddings_constraint=non_neg())(user_input)
    user_bias = Flatten()(user_bias)
    movie_bias = Embedding(input_dim=n_movies + 1,
                           output_dim=1,
                           embeddings_initializer='orthogonal',
                           embeddings_constraint=non_neg())(movie_input)
    movie_bias = Flatten()(movie_bias)

    preference = dot(inputs=[user_embedding, movie_embedding], axes=1)
    preference = add(inputs=[preference, user_bias, movie_bias])
    preference = Dense(1, bias_initializer='ones',
                       activation='linear')(preference)

    model = Model(inputs=[user_input, movie_input], outputs=preference)
    model.compile(loss='mse', optimizer='rmsprop')

    model.summary()

    checkpointer = ModelCheckpoint(filepath=model_name,
                                   monitor='val_loss',
                                   save_best_only=True,
                                   verbose=1)

    model.fit([users_train, movies_train],
              ratings_train,
              batch_size=256,
              epochs=20,
              validation_data=([users_val, movies_val], ratings_val),
              callbacks=[checkpointer],
コード例 #30
0
ファイル: nn_model_selector.py プロジェクト: DnAp/chatbot
    # --------------------------------------------------------------------------

    # финальный классификатор определяет способ получения ответа:
    # 1) да/нет
    # 2) ответ строится копированием слов вопроса
    # 3) текст ответа генерируется сеткой
    # 4) ответ посимвольно генерируется сеткой и содержит одни цифры
    output_dims = 4

    classifier = Dense(encoder_size, activation='sigmoid')(classifier)
    #classifier = Dense(encoder_size//2, activation='relu')(classifier)
    #classifier = Dense(encoder_size//3, activation='relu')(classifier)
    classifier = Dense(output_dims, activation='softmax',
                       name='output')(classifier)

    model = Model(inputs=inputs, outputs=classifier)
    model.compile(loss='categorical_crossentropy',
                  optimizer='nadam',
                  metrics=['accuracy'])
    model.summary()

    with open(arch_filepath, 'w') as f:
        f.write(model.to_json())

    # -------------------------------------------------------------------------

    SEED = 123456
    TEST_SHARE = 0.2
    train_samples, val_samples = train_test_split(samples,
                                                  test_size=TEST_SHARE,
                                                  random_state=SEED)