def model(self):
     encoded_input = Input(shape=(128,))
     hidden_layer = Dense(self.conf_s.NUMBER_OF_NEURONS_IN_HIDDEN_LAYER, activation='sigmoid')(encoded_input)
     output_layer = Dense(conf.NUM_LABELS, activation='sigmoid')(hidden_layer)
     classifier = Model(input=encoded_input, output=output_layer)
     classifier.compile(self.conf_s.OPTIMIZER, loss='categorical_crossentropy')
     return classifier
Beispiel #2
0
def test1():
    seq_size = 10
    batch_size = 10 
    rnn_size = 1
    xin = Input(batch_shape=(batch_size, seq_size,1))
    xtop = Input(batch_shape=(batch_size, seq_size))
    xbranch, xsummary = RTTN(rnn_size, return_sequences=True)([xin, xtop])

    model = Model(input=[xin, xtop], output=[xbranch, xsummary])
    model.compile(loss='MSE', optimizer='SGD')
    data_gen = generate_data_batch(batch_size, seq_size)
    model.fit_generator(generator=data_gen, samples_per_epoch=1000, nb_epoch=100)
def nn_architecture_seg_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001,
                        depth=3, n_base_filters=16, metrics=dice_coefficient, batch_normalization=True):
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    for layer_depth in range(depth):
        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2**layer_depth),
                                          batch_normalization=batch_normalization)
        layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2**layer_depth) * 2,
                                          batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = UpSampling3D(size=pool_size)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=concat, batch_normalization=batch_normalization)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=current_layer,
                                                 batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation('sigmoid')(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
    return model
Beispiel #4
0
class FergusNModel(object):
    def __init__(self, igor):

        now = datetime.now()
        self.run_name = "fergusn_{}mo_{}day_{}hr_{}min".format(
            now.month, now.day, now.hour, now.minute)
        log_location = join(igor.log_dir, self.run_name + ".log")
        self.logger = igor.logger = make_logger(igor, log_location)
        igor.verify_directories()
        self.igor = igor

    @classmethod
    def from_yaml(cls, yamlfile, kwargs=None):
        igor = Igor.from_file(yamlfile)
        igor.prep()
        model = cls(igor)
        model.make(kwargs)
        return model

    @classmethod
    def from_config(cls, config, kwargs=None):
        igor = Igor(config)
        model = cls(igor)
        igor.prep()
        model.make(kwargs)
        return model

    def load_checkpoint_weights(self):
        weight_file = join(self.igor.model_location, self.igor.saving_prefix,
                           self.igor.checkpoint_weights)
        if exists(weight_file):
            self.logger.info("+ Loading checkpoint weights")
            self.model.load_weights(weight_file, by_name=True)
        else:
            self.logger.warning(
                "- Checkpoint weights do not exist; {}".format(weight_file))

    def plot(self):
        filename = join(self.igor.model_location, self.igor.saving_prefix,
                        'model_visualization.png')
        kplot(self.model, to_file=filename)
        self.logger.debug("+ Model visualized at {}".format(filename))

    def make(self, theano_kwargs=None):
        '''Make the model and compile it. 

        Igor's config options control everything.  

        Arg:
            theano_kwargs as dict for debugging theano or submitting something custom
        '''

        if self.igor.embedding_type == "convolutional":
            make_convolutional_embedding(self.igor)
        elif self.igor.embedding_type == "token":
            make_token_embedding(self.igor)
        elif self.igor.embedding_type == "shallowconv":
            make_shallow_convolutional_embedding(self.igor)
        elif self.igor.embedding_type == "minimaltoken":
            make_minimal_token_embedding(self.igor)
        else:
            raise Exception("Incorrect embedding type")

        B = self.igor.batch_size
        spine_input_shape = (B, self.igor.max_num_supertags)
        child_input_shape = (B, 1)
        parent_input_shape = (B, 1)

        E, V = self.igor.word_embedding_size, self.igor.word_vocab_size  # for word embeddings

        repeat_N = self.igor.max_num_supertags  # for lex
        mlp_size = self.igor.mlp_size

        ## dropout parameters
        p_emb = self.igor.p_emb_dropout
        p_W = self.igor.p_W_dropout
        p_U = self.igor.p_U_dropout
        w_decay = self.igor.weight_decay
        p_mlp = self.igor.p_mlp_dropout

        def predict_params():
            return {
                'output_dim': 1,
                'W_regularizer': l2(w_decay),
                'activation': 'relu',
                'b_regularizer': l2(w_decay)
            }

        dspineset_in = Input(batch_shape=spine_input_shape,
                             name='daughter_spineset_in',
                             dtype='int32')
        pspineset_in = Input(batch_shape=spine_input_shape,
                             name='parent_spineset_in',
                             dtype='int32')
        dhead_in = Input(batch_shape=child_input_shape,
                         name='daughter_head_input',
                         dtype='int32')
        phead_in = Input(batch_shape=parent_input_shape,
                         name='parent_head_input',
                         dtype='int32')
        dspine_in = Input(batch_shape=child_input_shape,
                          name='daughter_spine_input',
                          dtype='int32')
        inputs = [dspineset_in, pspineset_in, dhead_in, phead_in, dspine_in]

        ### Layer functions
        ############# Convert the word indices to vectors
        F_embedword = Embedding(input_dim=V,
                                output_dim=E,
                                mask_zero=True,
                                W_regularizer=l2(w_decay),
                                dropout=p_emb)

        if self.igor.saved_embeddings is not None:
            self.logger.info("+ Cached embeddings loaded")
            F_embedword.initial_weights = [self.igor.saved_embeddings]

        ###### Prediction Functions
        ## these functions learn a vector which turns a tensor into a matrix of probabilities

        ### P(Parent supertag | Child, Context)
        F_parent_predict = ProbabilityTensor(
            name='parent_predictions',
            dense_function=Dense(**predict_params()))
        ### P(Leaf supertag)
        F_leaf_predict = ProbabilityTensor(
            name='leaf_predictions', dense_function=Dense(**predict_params()))

        ###### Network functions.
        ##### Input word, correct its dimensions (basically squash in a certain way)
        F_singleword = compose(Fix(), F_embedword)
        ##### Input spine, correct diemnsions, broadcast across 1st dimension
        F_singlespine = compose(RepeatVector(repeat_N), Fix(),
                                self.igor.F_embedspine)
        ##### Concatenate and map to a single space
        F_alignlex = compose(
            RepeatVector(repeat_N), Dropout(p_mlp),
            Dense(mlp_size, activation='relu', name='dense_align_lex'), concat)

        F_alignall = compose(
            Distribute(Dropout(p_mlp), name='distribute_align_all_dropout'),
            Distribute(Dense(mlp_size,
                             activation='relu',
                             name='align_all_dense'),
                       name='distribute_align_all_dense'), concat)
        F_alignleaf = compose(
            Distribute(
                Dropout(p_mlp * 0.66), name='distribute_leaf_dropout'
            ),  ### need a separate oen because the 'concat' is different for the two situations
            Distribute(Dense(mlp_size, activation='relu', name='leaf_dense'),
                       name='distribute_leaf_dense'),
            concat)

        ### embed and form all of the inputs into their components
        ### note: spines == supertags. early word choice, haven't refactored.
        leaf_spines = self.igor.F_embedspine(dspineset_in)
        pspine_context = self.igor.F_embedspine(pspineset_in)
        dspine_single = F_singlespine(dspine_in)

        dhead = F_singleword(dhead_in)
        phead = F_singleword(phead_in)

        ### combine the lexical material
        lexical_context = F_alignlex([dhead, phead])

        #### P(Parent Supertag | Daughter Supertag, Lexical Context)
        ### we know the daughter spine, want to know the parent spine
        ### size is (batch, num_supertags)
        parent_problem = F_alignall(
            [lexical_context, dspine_single, pspine_context])

        ### we don't have the parent, we just have a leaf
        leaf_problem = F_alignleaf([lexical_context, leaf_spines])

        parent_predictions = F_parent_predict(parent_problem)
        leaf_predictions = F_leaf_predict(leaf_problem)
        predictions = [parent_predictions, leaf_predictions]

        theano_kwargs = theano_kwargs or {}
        ## make it quick so i can load in the weights.
        self.model = Model(input=inputs,
                           output=predictions,
                           preloaded_data=self.igor.preloaded_data,
                           **theano_kwargs)

        #mask_cache = traverse_nodes(parent_prediction)
        #desired_masks = ['merge_3.in.mask.0']
        #self.p_tensor = K.function(inputs+[K.learning_phase()], [parent_predictions, F_parent_predict.inbound_nodes[0].input_masks[0]])

        if self.igor.from_checkpoint:
            self.load_checkpoint_weights()
        elif not self.igor.in_training:
            raise Exception("No point in running this without trained weights")

        if not self.igor.in_training:
            expanded_children = RepeatVector(repeat_N, axis=2)(leaf_spines)
            expanded_parent = RepeatVector(repeat_N, axis=1)(pspine_context)
            expanded_lex = RepeatVector(repeat_N, axis=1)(
                lexical_context
            )  # axis here is arbitary; its repeating on 1 and 2, but already repeated once
            huge_tensor = concat(
                [expanded_lex, expanded_children, expanded_parent])
            densely_aligned = LastDimDistribute(
                F_alignall.get(1).layer)(huge_tensor)
            output_predictions = Distribute(
                F_parent_predict, force_reshape=True)(densely_aligned)

            primary_inputs = [phead_in, dhead_in, pspineset_in, dspineset_in]
            leaf_inputs = [phead_in, dhead_in, dspineset_in]

            self.logger.info("+ Compiling prediction functions")
            self.inner_func = K.Function(primary_inputs + [K.learning_phase()],
                                         output_predictions)
            self.leaf_func = K.Function(leaf_inputs + [K.learning_phase()],
                                        leaf_predictions)
            try:
                self.get_ptensor = K.function(
                    primary_inputs + [K.learning_phase()], [
                        output_predictions,
                    ])
            except:
                import pdb
                pdb.set_trace()
        else:

            optimizer = Adam(self.igor.LR,
                             clipnorm=self.igor.max_grad_norm,
                             clipvalue=self.igor.grad_clip_threshold)

            theano_kwargs = theano_kwargs or {}
            self.model.compile(loss="categorical_crossentropy",
                               optimizer=optimizer,
                               metrics=['accuracy'],
                               **theano_kwargs)

        #self.model.save("here.h5")

    def likelihood_function(self, inputs):
        if self.igor.in_training:
            raise Exception("Not in testing mode; please fix the config file")
        return self.inner_func(tuple(inputs) + (0., ))

    def leaf_function(self, inputs):
        if self.igor.in_training:
            raise Exception("Not in testing mode; please fix the config file")
        return self.leaf_func(tuple(inputs) + (0., ))

    def train(self):
        replacers = {
            "daughter_predictions": "child",
            "parent_predictions": "parent",
            "leaf_predictions": "leaf"
        }
        train_data = self.igor.train_gen(forever=True)
        dev_data = self.igor.dev_gen(forever=True)
        N = self.igor.num_train_samples
        E = self.igor.num_epochs
        # generator, samplers per epoch, number epochs
        callbacks = [ProgbarV2(3, 10, replacers=replacers)]
        checkpoint_fp = join(self.igor.model_location, self.igor.saving_prefix,
                             self.igor.checkpoint_weights)
        self.logger.info("+ Model Checkpoint: {}".format(checkpoint_fp))
        callbacks += [
            ModelCheckpoint(filepath=checkpoint_fp,
                            verbose=1,
                            save_best_only=True)
        ]
        callbacks += [LearningRateScheduler(lambda epoch: self.igor.LR * 0.9)]
        csv_location = join(self.igor.log_dir, self.run_name + ".csv")
        callbacks += [CSVLogger(csv_location)]
        self.model.fit_generator(generator=train_data,
                                 samples_per_epoch=N,
                                 nb_epoch=E,
                                 callbacks=callbacks,
                                 verbose=1,
                                 validation_data=dev_data,
                                 nb_val_samples=self.igor.num_dev_samples)

    def debug(self):
        dev_data = self.igor.dev_gen(forever=False)
        X, Y = next(dev_data)
        self.model.predict_on_batch(X)
        #self.model.evaluate_generator(dev_data, self.igor.num_dev_samples)

    def profile(self, num_iterations=1):
        train_data = self.igor.train_gen(forever=True)
        dev_data = self.igor.dev_gen(forever=True)
        # generator, samplers per epoch, number epochs
        callbacks = [ProgbarV2(1, 10)]
        self.logger.debug("+ Beginning the generator")
        self.model.fit_generator(generator=train_data,
                                 samples_per_epoch=self.igor.batch_size * 10,
                                 nb_epoch=num_iterations,
                                 callbacks=callbacks,
                                 verbose=1,
                                 validation_data=dev_data,
                                 nb_val_samples=self.igor.batch_size)
        self.logger.debug(
            "+ Calling theano's pydot print.. this might take a while")
        theano.printing.pydotprint(self.model.train_function.function,
                                   outfile='theano_graph.png',
                                   var_with_name_simple=True,
                                   with_ids=True)
        self.logger.debug("+ Calling keras' print.. this might take a while")
        self.plot("keras_graph.png")
def gru_model(embedding_weights, cv_dat, max_len, model_w, lda, dictionary,
              idx2word, alpha, gru_time_steps):
    """
    GRU with attention mechanism
    :param embedding_weights:
    :param cv_dat:
    :param max_len:
    :param model_w:
    :param lda:
    :param dictionary:
    :param idx2word:
    :param alpha:
    :return:
    """
    max_len = 200 if max_len > 1000 else max_len
    #max_len = 1000
    dropout = 0.8
    print max_len

    #json_file = open(model_w+'model.json', 'r')
    #loaded_model_json = json_file.read()
    #json_file.close()
    #model_lda = model_from_json(loaded_model_json)
    # load weights into new model
    #layer_dict = dict([(layer.name, layer) for layer in model_lda.layers])
    #print layer_dict.keys()
    ##print layer_dict
    train_x, test_x, train_y, test_y = cv_dat
    #test_lda = get_alpha(test_x, lda, dictionary, idx2word)

    print "Maximum length of sentence:" + str(max_len)
    print "Distribution of labels in training set:"
    print Counter([np.argmax(dat) for dat in train_y])
    print "Distribution of labels in testing set:"
    print Counter([np.argmax(dat) for dat in test_y])
    #print "Distribution of labels in testing set 1k:"
    #print Counter([np.argmax(dat) for dat in test_y1k])

    #print (train_x.shape)
    #print train_y.shape
    train_x, val_x, train_y, val_y = train_test_split(train_x,
                                                      train_y,
                                                      test_size=0.166667,
                                                      random_state=666,
                                                      stratify=train_y)

    train_lda = get_alpha(train_x, lda, dictionary, idx2word, max_len)
    val_lda = get_alpha(val_x, lda, dictionary, idx2word, max_len)
    print val_lda[0]
    test_lda = get_alpha(test_x, lda, dictionary, idx2word, max_len)
    #test_lda1k = get_alpha(test_x1k, lda, dictionary, idx2word, max_len)
    print val_lda.shape
    #defining the model architecture now

    train_x = np.array(sequence.pad_sequences(train_x, maxlen=max_len),
                       dtype=np.int)
    val_x = np.array(sequence.pad_sequences(val_x, maxlen=max_len),
                     dtype=np.int)
    test_x = np.array(sequence.pad_sequences(test_x, maxlen=max_len),
                      dtype=np.int)
    #test_x1k = np.array(sequence.pad_sequences(test_x1k, maxlen=max_len), dtype=np.int)

    review_text = Input(shape=(max_len, ), dtype='int64', name="body_input")
    lda_input = Input(shape=(50, ), dtype='float32', name="lda_inp")

    embedded_layer_body = Embedding(embedding_weights.shape[0],
                                    embedding_weights.shape[1],
                                    mask_zero=False,
                                    input_length=max_len,
                                    weights=[embedding_weights],
                                    trainable=True)(review_text)

    #define the GRU with attention
    #gru_l = LSTMCustom(gru_time_steps, init='glorot_uniform',  activation='tanh',dropout_W=0.3, dropout_U=0.3, topic_distribution=lda_input)(embedded_layer_body)
    gru_l = LSTM(gru_time_steps,
                 init='glorot_uniform',
                 activation='tanh',
                 dropout_W=0.3,
                 dropout_U=0.3)(embedded_layer_body)
    print gru_l
    #gru_lda = merge([gru_l, lda_input], name='add_lda', mode='sum')
    #gru_lda = Lambda(lambda x: merge([x, lda_input], name='add_lda', mode='sum'))(gru_l)

    #attn_wghts = Permute((2, 1))(gru_lda)
    #print attn_wghts
    #ttn_wghts = Reshape((300, gru_time_steps))(attn_wghts)
    #attn_wghts = Dense(gru_time_steps, activation='softmax')(attn_wghts)
    #alpha_wghts = Permute((2, 1), name='attention_vec')(attn_wghts)
    #output_attention_mul = merge([attn_wghts, alpha_wghts], name='attention_mul', mode='mul')
    #attention_mul = Lambda(lambda x: K.sum(x, axis=1))(output_attention_mul)
    #add_lda =  merge([attention_mul, lda_input], name='add_lda', mode='sum')
    #print attention_mul
    #l_dense = TimeDistributed(Dense(gru_time_steps*2))(gru_l)
    #l_att = Attention(lda=lda_input)(gru_l)

    #output_lda = Dense(30, activation='softmax', name='out_lda')(attention_mul)

    hidden_layer = Dense(1200,
                         activation='tanh',
                         kernel_initializer="glorot_uniform")(gru_l)
    #hidden_concat = concatenate([hidden_layer, lda_vec])
    dropout_hidden = Dropout(dropout)(hidden_layer)
    #merge_hidden = concatenate([dropout_hidden, lda_input])
    batch_norm = BatchNormalization()(dropout_hidden)

    hidden_layer_3 = Dense(600,
                           activation='tanh',
                           kernel_initializer="glorot_uniform")(batch_norm)
    dropout_hidden_3 = Dropout(0.5)(hidden_layer_3)
    batch_n_3 = BatchNormalization()(dropout_hidden_3)

    output_layer = Dense(2, activation='softmax', name='out_sent')(batch_n_3)

    model = Model([review_text, lda_input], output=[output_layer])
    layer_dict_nu = dict([(layer.name, layer) for layer in model.layers])

    adam = Adam(lr=0.0001)

    #model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'], loss_weights={'out_sent': (1 - alpha), 'out_lda': alpha})
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    #model.compile(loss=ncce, optimizer=adam, metrics=['accuracy'])

    earlystop = EarlyStopping(monitor='val_loss',
                              min_delta=0.0001,
                              patience=3,
                              verbose=1,
                              mode='auto')
    callbacks_list = [earlystop]
    print model.summary()
    model.fit([train_x, train_lda], [train_y],
              batch_size=128,
              epochs=100,
              verbose=1,
              shuffle=True,
              callbacks=callbacks_list,
              validation_data=[[val_x, val_lda], [val_y]])
    #model.fit([train_x, train_lda], [train_y, train_lda], batch_size=64, epochs=25,
    #          verbose=1, shuffle=True)
    test_predictions = model.predict([test_x, test_lda], verbose=False)
    #test_predictions1k = model.predict([test_x1k, test_lda1k], verbose=False)
    test_y = [np.argmax(pred) for pred in test_y]
    test_pred = [np.argmax(pred) for pred in test_predictions]
    #test_pred1k = [np.argmax(pred) for pred in test_predictions1k]
    #print test_pred1k
    #test_y = [np.argmax(label) for label in test_y]
    #test_y1k = [np.argmax(label) for label in test_y1k]
    error_preds = [
        i for i in range(0, len(test_pred)) if (test_y[i] != test_pred[i])
    ]
    print len(error_preds)
    misclassified = [test_x[i] for i in error_preds]
    misclassified = [[get_id2word(idx, idx2word) for idx in sent if idx != 0]
                     for sent in misclassified]
    labels = [(test_y[i], test_pred[i]) for i in error_preds]
    acc = accuracy_score(test_y, test_pred)
    print acc
    #acc1k = f1_score(test_y1k, test_pred1k, average='macro')
    #print acc1k
    return acc, misclassified, labels
Beispiel #6
0
x = Flatten()(x)

x = Dense(256, name='fc3')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)

x = Dense(64, name='fc4')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)

x = Dense(11, name='fc5')(x)
x = BatchNormalization()(x)

logit = Activation('softmax')(x)

netowrk = Model(input, logit)
print('netowrk build finish')

x = np.load('data/wv_mat3.npy')
y = np.load('data/onehotlable.npy')
print('data load finish')

opt = SGD(lr=0.1, momentum=0.7, decay=0.9)

netowrk.compile(loss='categorical_crossentropy',
                optimizer='RMSprop',
                metrics=['accuracy'])

netowrk.fit(x,
            y,
            batch_size=32,
Beispiel #7
0
if mode == 3:
    train_X_ex = np.expand_dims(np.array(train_mfccs), -1)
    test_X_ex = np.expand_dims(np.array(test_mfccs), -1)
    print('train X shape:', train_X_ex.shape)
    print('test X shape:', test_X_ex.shape)

    ip = Input(shape=train_X_ex[0].shape)
    m = Conv2D(64, kernel_size=(4, 4), activation='relu')(ip)
    m = MaxPooling2D(pool_size=(4, 4))(m)
    # m = Conv2D(128, kernel_size=(2, 2), activation='relu')(ip)
    # m = MaxPooling2D(pool_size=(2, 2))(m)
    m = Flatten()(m)
    m = Dense(32, activation='relu')(m)
    op = Dense(class_num, activation='softmax')(m)

model = Model(input=ip, output=op)

model.summary()

model.compile(
    loss='categorical_crossentropy',
    # maybe try using different optimizer
    optimizer='adam',
    metrics=['acc'])

history = model.fit(train_X_ex,
                    train_y,
                    epochs=epochs,
                    batch_size=32,
                    validation_data=(test_X_ex, test_y))
def run(_run, image_shape, data_dir, patches, estimator_type, submission_info,
        solution, architecture, weights, batch_size, last_base_layer,
        use_gram_matrix, pooling, dense_layers, device, chunks, limb_weights,
        dropout_rate, ckpt, results_file, submission_file, use_multiprocessing,
        workers, outputs_meta, limb_dense_layers, joint_weights):
    report_dir = _run.observers[0].dir

    with tf.device(device):
        print('building...')
        model = build_siamese_mo_model(image_shape,
                                       architecture,
                                       outputs_meta,
                                       dropout_rate,
                                       weights,
                                       last_base_layer=last_base_layer,
                                       use_gram_matrix=use_gram_matrix,
                                       limb_dense_layers=limb_dense_layers,
                                       pooling=pooling,
                                       trainable_limbs=False,
                                       limb_weights=limb_weights,
                                       trainable_joints=False,
                                       joint_weights=joint_weights,
                                       dense_layers=dense_layers)

        print('loading weights from', ckpt)
        model.load_weights(ckpt)

        x = []
        for m in outputs_meta:
            name = m['n']
            shape = [m['e']]
            x += [
                Input(shape, name='%s_ia' % name),
                Input(shape, name='%s_ib' % name)
            ]

        o = []
        for i, m in enumerate(outputs_meta):
            name = m['n']
            y = [x[2 * i], x[2 * i + 1]]
            y = model.get_layer('multiply_%i' % (i + 1))(y)
            y = model.get_layer('%s_binary_predictions' % name)(y)
            o += [y]

        rest = model.layers.index(model.get_layer('concatenate_asg'))
        for l in model.layers[rest:]:
            o = l(o)

        meta_model = Model(inputs=x, outputs=o)
        del model

        print('loading submission and solution...')
        pairs = pd.read_csv(submission_info, quotechar='"',
                            delimiter=',').values[:, 1:]
        labels = pd.read_csv(solution, quotechar='"',
                             delimiter=',').values[:, 1:].flatten()

        print('loading sequential predictions...')
        d = load_pickle_data(data_dir,
                             phases=['test'],
                             keys=['data', 'names'],
                             chunks=chunks)
        samples, names = d['test']
        samples = np.asarray(
            list(zip(*(samples['%s_em3' % o['n']] for o in outputs_meta))))
        samples, names = group_by_paintings(samples, names=names)
        names = np.asarray([n.split('/')[1] + '.jpg' for n in names])

        print('test data shape:', samples.shape)

        print('\n# test evaluation')
        test_data = ArrayPairsSequence(samples, names, pairs, labels,
                                       batch_size)
        probabilities = meta_model.predict_generator(
            test_data,
            use_multiprocessing=use_multiprocessing,
            workers=workers,
            verbose=1).reshape(-1, patches)
        del meta_model
        K.clear_session()

    layer_results = evaluate(labels, probabilities, estimator_type)
    layer_results['phase'] = 'test'
    evaluation_results = [layer_results]

    # generate results file.
    with open(os.path.join(report_dir, results_file), 'w') as file:
        json.dump(evaluation_results, file)

    # generate submission file to Kaggle.
    for v in layer_results['evaluations']:
        predictions_field = 'binary_probabilities' if 'binary_probabilities' in v else 'p'
        p = v[predictions_field]

        with open(
                os.path.join(report_dir,
                             submission_file.format(strategy=v['strategy'])),
                'w') as f:
            f.write('index,sameArtist\n')
            f.writelines(['%i,%f\n' % (i, _p) for i, _p in enumerate(p)])
Beispiel #9
0
    def _init_translation_ranker(self,
                                 l_field_translate,
                                 ltr_input=None,
                                 q_att_input=None,
                                 l_field_att_input=None,
                                 aux=False):
        """
        construct ranker for given inputs
        :param l_field_translate: translaiton matrices
        :param ltr_input: if use ltr features to combine
        :param q_att_input: q attention input
        :param l_field_att_input: field attention input
        :param aux:
        :return:
        """
        pre = ""
        if aux:
            pre = self.aux_pre
        q_att = None
        l_field_att = []
        if self.with_attention:
            if not aux:
                self.l_q_att_in = q_att_input
                self.l_field_att_in = l_field_att_input
            q_att = Reshape(target_shape=(-1, 1, 1))(self.q_att(q_att_input))
            l_field_att = [
                Reshape(target_shape=(1, -1, 1))(self.l_field_att[p](
                    l_field_att_input[p]))
                for p in xrange(len(self.l_field_att))
            ]

        # perform kernel pooling
        l_kp_features = []
        for p in xrange(len(self.l_d_field)):
            # field = self.l_d_field[p]
            f_in = l_field_translate[p]
            d_layer = self.kernel_pool(f_in)
            # TODO test
            if self.with_attention:
                # need custom multiple layer to do * along target axes
                # use broadcast reshape attention to targeted dimensions, and then use multiply
                # q_att = Reshape(target_shape=(-1, 1, 1))(q_att)
                d_layer = multiply([d_layer, q_att])
                # l_field_att[p] = Reshape(target_shape=(1, -1, 1))(l_field_att[p])
                d_layer = multiply([d_layer, l_field_att[p]])
            if not aux:
                self.l_d_layer.append(d_layer)
            d_layer = self.kp_logsum(d_layer)
            l_kp_features.append(d_layer)

        # put features to one vector
        if len(l_kp_features) > 1:
            ranking_features = concatenate(l_kp_features,
                                           name=pre + 'ranking_features')
        else:
            ranking_features = l_kp_features[0]

        # # test
        # test_model = Model(inputs=l_field_translate, outputs=ranking_features)
        # test_model.summary()

        if ltr_input:
            ranking_features = concatenate([ranking_features, ltr_input],
                                           name=pre +
                                           'ranking_features_with_ltr')

        ranking_layer = self.ltr_layer(ranking_features)
        l_full_inputs = l_field_translate
        if self.with_attention:
            l_full_inputs.append(q_att_input)
            l_full_inputs.extend(l_field_att_input)
        if ltr_input:
            l_full_inputs.append(ltr_input)
        ranker = Model(inputs=l_full_inputs,
                       outputs=ranking_layer,
                       name=pre + 'ranker')

        return ranker
    print('Loading previously trained model1...')
    model = load_model(previouslytrainedModelpath)
    print(previouslytrainedModelpath + ' successfully loaded!')
    custom_resnet_model=model
else :
    print('Initializing resnet50 model1')
    model = ResNet50(input_tensor=image_input, include_top=True,weights='imagenet')
    model.summary()
#    sys.exit(1)
    
    x = model.get_layer('res5a_branch2a').input
    x = GlobalAveragePooling2D(name='avg_pool')(x)
#    x = Dense(512, activation='relu',name='fc-1')(x)
    x = Dropout(0.5)(x)
    out = Dense(num_classes, activation='softmax', name='output_layer')(x)
    custom_resnet_model = Model(inputs=image_input,outputs= out)


#custom_resnet_model.summary()

for layer in custom_resnet_model.layers[:]:
	layer.trainable = True

#custom_resnet_model.layers[-1].trainable

#custom_resnet_model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])

# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
custom_resnet_model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.SGD(lr=learningrate, momentum=momentum),
Beispiel #11
0
def train_rpn(model_file=None):

    parser = OptionParser()
    parser.add_option("--train_path", dest="train_path", help="Path to training data.",
                      default='/Users/jie/projects/PanelSeg/ExpPython/train.txt')
    parser.add_option("--val_path", dest="val_path", help="Path to validation data.",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval.txt')
    parser.add_option("--num_rois", type="int", dest="num_rois", help="Number of RoIs to process at once.",
                      default=32)
    parser.add_option("--network", dest="network", help="Base network to use. Supports nn_cnn_3_layer.",
                      default='nn_cnn_3_layer')
    parser.add_option("--num_epochs", type="int", dest="num_epochs", help="Number of epochs.",
                      default=2000)
    parser.add_option("--output_weight_path", dest="output_weight_path", help="Output path for weights.",
                      default='./model_rpn.hdf5')
    parser.add_option("--input_weight_path", dest="input_weight_path",
                      default='/Users/jie/projects/PanelSeg/ExpPython/models/label+bg_rpn_3_layer_color-0.135.hdf5')

    (options, args) = parser.parse_args()

    # set configuration
    c = Config.Config()

    c.model_path = options.output_weight_path
    c.num_rois = int(options.num_rois)

    import nn_cnn_3_layer as nn

    c.base_net_weights = options.input_weight_path

    val_imgs, val_classes_count = get_label_rpn_data(options.val_path)
    train_imgs, train_classes_count = get_label_rpn_data(options.train_path)

    classes_count = {k: train_classes_count.get(k, 0) + val_classes_count.get(k, 0)
                     for k in set(train_classes_count) | set(val_classes_count)}
    class_mapping = LABEL_MAPPING

    if 'bg' not in classes_count:
        classes_count['bg'] = 0
        class_mapping['bg'] = len(class_mapping)

    c.class_mapping = class_mapping

    inv_map = {v: k for k, v in class_mapping.items()}

    print('Training images per class:')
    pprint.pprint(classes_count)
    print('Num classes (including bg) = {}'.format(len(classes_count)))

    config_output_filename = 'config.pickle'

    with open(config_output_filename, 'wb') as config_f:
        pickle.dump(c, config_f)
        print('Config has been written to {}, and can be loaded when testing to ensure correct results'.format(
            config_output_filename))

    random.shuffle(train_imgs)
    random.shuffle(val_imgs)

    num_imgs = len(train_imgs) + len(val_imgs)

    print('Num train samples {}'.format(len(train_imgs)))
    print('Num val samples {}'.format(len(val_imgs)))

    data_gen_train = label_rcnn_data_generators.get_anchor_gt(
        train_imgs, classes_count, c, nn.nn_get_img_output_length, mode='train')
    data_gen_val = label_rcnn_data_generators.get_anchor_gt(
        val_imgs, classes_count, c, nn.nn_get_img_output_length, mode='val')

    input_shape_img = (None, None, 3)
    img_input = Input(shape=input_shape_img)
    # roi_input = Input(shape=(None, 4))

    # define the base network (resnet here, can be VGG, Inception, etc)
    shared_layers = nn.nn_base(img_input, trainable=True)

    # define the RPN, built on the base layers
    num_anchors = len(c.anchor_box_scales) * len(c.anchor_box_ratios)
    rpn = nn.rpn(shared_layers, num_anchors)

    # classifier = nn.classifier(shared_layers, roi_input, c.num_rois, nb_classes=len(classes_count), trainable=True)

    model_rpn = Model(img_input, rpn[:2])
    # model_classifier = Model([img_input, roi_input], classifier)

    # this is a model that holds both the RPN and the classifier, used to load/save weights for the models
    # model_all = Model([img_input, roi_input], rpn[:2] + classifier)

    print('loading weights from {}'.format(c.base_net_weights))
    model_rpn.load_weights(c.base_net_weights, by_name=True)
    # model_classifier.load_weights(c.base_net_weights, by_name=True)
    model_rpn.summary()

    optimizer = Adam(lr=1e-5)
    # optimizer_classifier = Adam(lr=1e-5)
    model_rpn.compile(optimizer=optimizer, loss=[nn.rpn_loss_cls(num_anchors), nn.rpn_loss_regr(num_anchors)])
    # model_classifier.compile(optimizer=optimizer_classifier,
    #                          loss=[nn.class_loss_cls, nn.class_loss_regr(len(classes_count) - 1)],
    #                          metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'})
    # model_all.compile(optimizer='sgd', loss='mae')

    epoch_length = 1000
    num_epochs = int(options.num_epochs)
    iter_num = 0

    losses = np.zeros((epoch_length, 5))
    rpn_accuracy_rpn_monitor = []
    rpn_accuracy_for_epoch = []
    start_time = time.time()

    best_loss = np.Inf

    class_mapping_inv = {v: k for k, v in class_mapping.items()}
    print('Starting training')
    vis = True

    for epoch_num in range(num_epochs):
        progbar = generic_utils.Progbar(epoch_length)
        print('Epoch {}/{}'.format(epoch_num + 1, num_epochs))

        while True:
            try:
                if len(rpn_accuracy_rpn_monitor) == epoch_length and c.verbose:
                    mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor)) / len(rpn_accuracy_rpn_monitor)
                    rpn_accuracy_rpn_monitor = []
                    print('Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(
                            mean_overlapping_bboxes, epoch_length))
                    if mean_overlapping_bboxes == 0:
                        print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')

                X, Y, img_data = next(data_gen_train)

                loss_rpn = model_rpn.train_on_batch(X, Y)

                P_rpn = model_rpn.predict_on_batch(X)

                R = label_rcnn_roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], c, K.image_dim_ordering(), use_regr=True,
                                                      overlap_thresh=0.7, max_boxes=300)
                # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
                X2, Y1, Y2, IouS = label_rcnn_roi_helpers.calc_iou(R, img_data, c, class_mapping)

                if X2 is None:
                    rpn_accuracy_rpn_monitor.append(0)
                    rpn_accuracy_for_epoch.append(0)
                    continue

                neg_samples = np.where(Y1[0, :, -1] == 1)
                pos_samples = np.where(Y1[0, :, -1] == 0)

                if len(neg_samples) > 0:
                    neg_samples = neg_samples[0]
                else:
                    neg_samples = []

                if len(pos_samples) > 0:
                    pos_samples = pos_samples[0]
                else:
                    pos_samples = []

                rpn_accuracy_rpn_monitor.append(len(pos_samples))
                rpn_accuracy_for_epoch.append((len(pos_samples)))

                if c.num_rois > 1:
                    if len(pos_samples) < c.num_rois // 2:
                        selected_pos_samples = pos_samples.tolist()
                    else:
                        selected_pos_samples = np.random.choice(pos_samples, c.num_rois // 2, replace=False).tolist()
                    try:
                        selected_neg_samples = np.random.choice(neg_samples, c.num_rois - len(selected_pos_samples),
                                                                replace=False).tolist()
                    except:
                        selected_neg_samples = np.random.choice(neg_samples, c.num_rois - len(selected_pos_samples),
                                                                replace=True).tolist()

                    sel_samples = selected_pos_samples + selected_neg_samples
                else:
                    # in the extreme case where num_rois = 1, we pick a random pos or neg sample
                    selected_pos_samples = pos_samples.tolist()
                    selected_neg_samples = neg_samples.tolist()
                    if np.random.randint(0, 2):
                        sel_samples = random.choice(neg_samples)
                    else:
                        sel_samples = random.choice(pos_samples)

                # loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]],
                #                                              [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])

                losses[iter_num, 0] = loss_rpn[1]
                losses[iter_num, 1] = loss_rpn[2]

                # losses[iter_num, 2] = loss_class[1]
                # losses[iter_num, 3] = loss_class[2]
                # losses[iter_num, 4] = loss_class[3]

                iter_num += 1

                progbar.update(iter_num,
                               [('rpn_cls', np.mean(losses[:iter_num, 0])),
                                ('rpn_regr', np.mean(losses[:iter_num, 1]))])
                # progbar.update(iter_num,
                #                [('rpn_cls', np.mean(losses[:iter_num, 0])),
                #                 ('rpn_regr', np.mean(losses[:iter_num, 1])),
                #                 ('detector_cls', np.mean(losses[:iter_num, 2])),
                #                 ('detector_regr', np.mean(losses[:iter_num, 3]))])

                if iter_num == epoch_length:
                    loss_rpn_cls = np.mean(losses[:, 0])
                    loss_rpn_regr = np.mean(losses[:, 1])
                    # loss_class_cls = np.mean(losses[:, 2])
                    # loss_class_regr = np.mean(losses[:, 3])
                    # class_acc = np.mean(losses[:, 4])

                    mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
                    rpn_accuracy_for_epoch = []

                    if c.verbose:
                        print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(
                            mean_overlapping_bboxes))
                        # print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc))
                        print('Loss RPN classifier: {}'.format(loss_rpn_cls))
                        print('Loss RPN regression: {}'.format(loss_rpn_regr))
                        # print('Loss Detector classifier: {}'.format(loss_class_cls))
                        # print('Loss Detector regression: {}'.format(loss_class_regr))
                        print('Elapsed time: {}'.format(time.time() - start_time))

                    curr_loss = loss_rpn_cls + loss_rpn_regr
                    # curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
                    iter_num = 0
                    start_time = time.time()

                    if curr_loss < best_loss:
                        if c.verbose:
                            print('Total loss decreased from {} to {}, saving weights'.format(best_loss, curr_loss))
                        best_loss = curr_loss
                        model_rpn.save_weights(c.model_path)
                        # model_all.save_weights(c.model_path)

                    break

            except Exception as e:
                print('Exception: {}'.format(e))
                continue

    print('Training complete, exiting.')
Beispiel #12
0
def unet_model_3d(input_shape,
                  pool_size=(2, 2, 2),
                  n_labels=1,
                  initial_learning_rate=0.00001,
                  deconvolution=False,
                  depth=4,
                  n_base_filters=32,
                  include_label_wise_dice_coefficients=False,
                  metrics=dice_coefficient,
                  batch_normalization=True,
                  activation_name="sigmoid"):

    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    # add levels with max pooling
    for layer_depth in range(depth):
        layer1 = create_convolution_block(
            input_layer=current_layer,
            n_filters=n_base_filters * (2**layer_depth),
            batch_normalization=batch_normalization)
        layer2 = create_convolution_block(
            input_layer=layer1,
            n_filters=n_base_filters * (2**layer_depth) * 2,
            batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    # add levels with up-convolution or up-sampling
    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = get_up_convolution(
            pool_size=pool_size,
            deconvolution=deconvolution,
            n_filters=current_layer._keras_shape[1])(current_layer)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=4)
        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1]._keras_shape[1],
            input_layer=concat,
            batch_normalization=batch_normalization)
        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1]._keras_shape[1],
            input_layer=current_layer,
            batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation(activation_name)(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    if include_label_wise_dice_coefficients and n_labels > 1:
        label_wise_dice_metrics = [
            get_label_dice_coefficient_function(index)
            for index in range(n_labels)
        ]
        if metrics:
            metrics = metrics + label_wise_dice_metrics
        else:
            metrics = label_wise_dice_metrics

    model.compile(optimizer=Adam(lr=initial_learning_rate),
                  loss=dice_coefficient_loss,
                  metrics=metrics)
    return model
Beispiel #13
0
    def build_model(self):
        input_tensor = Input(batch_shape=self.input_shape)
        last_layer_name = 'activation_22'
        base_model = ResNet50(include_top=False,
                              input_tensor=input_tensor,
                              weights=None)
        base_model_out = base_model.get_layer(last_layer_name).output
        model = Model(input=base_model.input, output=base_model_out)
        no_features = base_model_out.get_shape()[3].value
        model = Convolution2D(no_features,
                              1,
                              1,
                              border_mode='same',
                              activation='relu')(model.output)
        model = Dropout(0.5)(model)
        class_out = Deconvolution2D(self.out_channels,
                                    8,
                                    8,
                                    output_shape=self.output_shape,
                                    subsample=(8, 8),
                                    activation='sigmoid',
                                    name='class_out')(model)

        model = Model(base_model.input, output=class_out)

        optimizer = Adam(lr=self.learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        model.compile(optimizer=optimizer,
                      loss={'class_out': 'binary_crossentropy'},
                      metrics=['binary_accuracy'])
        for i, layer in enumerate(model.layers):
            layer.trainable = False
            if layer.name == last_layer_name:
                break

        if self.weight_file:
            logger.info('Loading weights from :{}', self.weight_file)
            model.load_weights(self.weight_file)

        logger.info('Compiled fully conv with output:{}', model.output)
        model.summary()
        return model
Beispiel #14
0
        def unet_model_3d(input_shape,
                          downsize_filters_factor=1,
                          pool_size=(2, 2, 2),
                          n_labels=1,
                          initial_learning_rate=0.01,
                          deconvolution=False):
            """
            Builds the 3D U-Net Keras model.
            The [U-Net](https://arxiv.org/abs/1505.04597) uses a fully-convolutional architecture consisting of an
            encoder and a decoder. The encoder is able to capture contextual information while the decoder enables
            precise localization. Due to the large amount of parameters, the input shape has to be small since for e.g.
            images of shape 144x144x144 the model already consumes 32 GB of memory.

            :param input_shape: Shape of the input data (x_size, y_size, z_size, n_channels).
            :param downsize_filters_factor: Factor to which to reduce the number of filters. Making this value larger
            will reduce the amount of memory the model will need during training.
            :param pool_size: Pool size for the max pooling operations.
            :param n_labels: Number of binary labels that the model is learning.
            :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
            :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of upsamping.
            This increases the amount memory required during training.
            :return: Untrained 3D UNet Model
            """
            inputs = Input(input_shape)
            conv1 = Conv3D(int(32 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(inputs)
            conv1 = Conv3D(int(64 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv1)
            pool1 = MaxPooling3D(pool_size=pool_size)(conv1)

            conv2 = Conv3D(int(64 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(pool1)
            conv2 = Conv3D(int(128 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv2)
            pool2 = MaxPooling3D(pool_size=pool_size)(conv2)

            conv3 = Conv3D(int(128 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(pool2)
            conv3 = Conv3D(int(256 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv3)
            print(conv3.shape)
            pool3 = MaxPooling3D(pool_size=pool_size)(conv3)

            conv4 = Conv3D(int(256 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(pool3)
            conv4 = Conv3D(int(512 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv4)
            print(conv4.shape)

            up5 = get_upconv(pool_size=pool_size,
                             deconvolution=deconvolution,
                             depth=2,
                             nb_filters=int(512 / downsize_filters_factor),
                             image_shape=input_shape[-3:])(conv4)
            print(up5.shape)
            up5 = concatenate([up5, conv3], axis=4)
            conv5 = Conv3D(int(256 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(up5)
            conv5 = Conv3D(int(256 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv5)

            up6 = get_upconv(pool_size=pool_size,
                             deconvolution=deconvolution,
                             depth=1,
                             nb_filters=int(256 / downsize_filters_factor),
                             image_shape=input_shape[-3:])(conv5)
            up6 = concatenate([up6, conv2], axis=4)
            conv6 = Conv3D(int(128 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(up6)
            conv6 = Conv3D(int(128 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv6)

            up7 = get_upconv(pool_size=pool_size,
                             deconvolution=deconvolution,
                             depth=0,
                             nb_filters=int(128 / downsize_filters_factor),
                             image_shape=input_shape[-3:])(conv6)
            up7 = concatenate([up7, conv1], axis=4)
            conv7 = Conv3D(int(64 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(up7)
            conv7 = Conv3D(int(64 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv7)

            conv8 = Conv3D(n_labels, (1, 1, 1))(conv7)
            act = Activation('sigmoid')(conv8)
            model = Model(inputs=inputs, outputs=act)

            model.compile(optimizer=Adam(lr=initial_learning_rate),
                          loss=SegmentationModel.dice_coef_loss,
                          metrics=[SegmentationModel.dice_coef])

            return model

def unormalise(x):
    # outputs in range [0, 1] resized to range [-100, 100]
    return (x * 200) - 100


last = Lambda(resize_image)(last)
last = Lambda(unormalise)(last)


def custom_mse(y_true, y_pred):
    return K.mean(K.square(y_pred - y_true), axis=[1, 2, 3])


model = Model(inputs=[main_input, vgg16.input], output=last)
opt = optimizers.Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(optimizer=opt, loss=custom_mse)

model.summary()

start_from = 160
save_every_n_epoch = 10
n_epochs = 300

model.load_weights("../weights/implementation7l-150.h5")

g = image_processing.image_generator_parts(list_dir,
                                           b_size,
                                           im_size=(224, 224))
Beispiel #16
0
def run(dataset_seed, image_shape, batch_size, device, data_dir, output_dir,
        phases, architecture, include_base_top, include_top, o_meta, ckpt_file,
        weights, pooling, dense_layers, use_gram_matrix, last_base_layer,
        override, embedded_files_max_size, selected_layers):
    os.makedirs(output_dir, exist_ok=True)

    with tf.device(device):
        print('building model...')
        model = build_model(image_shape,
                            architecture=architecture,
                            weights=weights,
                            dropout_p=.0,
                            pooling=pooling,
                            last_base_layer=last_base_layer,
                            use_gram_matrix=use_gram_matrix,
                            dense_layers=dense_layers,
                            include_base_top=include_base_top,
                            include_top=include_top,
                            classes=[o['u'] for o in o_meta],
                            predictions_name=[o['n'] for o in o_meta],
                            predictions_activation=[o['a'] for o in o_meta])
        if ckpt_file:
            # Restore best parameters.
            print('loading weights from:', ckpt_file)
            model.load_weights(ckpt_file)

        available_layers = [l.name for l in model.layers]
        if set(selected_layers) - set(available_layers):
            print('available layers:', available_layers)
            raise ValueError('selection contains unknown layers: %s' %
                             selected_layers)

        style_features = [model.get_layer(l).output for l in selected_layers]

        if use_gram_matrix:
            gram_layer = layers.Lambda(gram_matrix,
                                       arguments=dict(norm_by_channels=False))
            style_features = [gram_layer(f) for f in style_features]

        model = Model(inputs=model.inputs, outputs=style_features)

    g = ImageDataGenerator(
        preprocessing_function=get_preprocess_fn(architecture))

    for phase in phases:
        phase_data_dir = os.path.join(data_dir, phase)
        output_file_name = os.path.join(output_dir, phase + '.%i.pickle')
        already_embedded = os.path.exists(output_file_name % 0)
        phase_exists = os.path.exists(phase_data_dir)

        if already_embedded and not override or not phase_exists:
            print('%s transformation skipped' % phase)
            continue

        # Shuffle must always be off in order to keep names consistent.
        data = g.flow_from_directory(phase_data_dir,
                                     target_size=image_shape[:2],
                                     class_mode='sparse',
                                     batch_size=batch_size,
                                     shuffle=False,
                                     seed=dataset_seed)
        print('transforming %i %s samples from %s' %
              (data.n, phase, phase_data_dir))
        part_id = 0
        samples_seen = 0
        displayed_once = False

        while samples_seen < data.n:
            z, y = {n: [] for n in selected_layers}, []
            chunk_size = 0
            chunk_start = samples_seen

            while chunk_size < embedded_files_max_size and samples_seen < data.n:
                _x, _y = next(data)

                outputs = model.predict_on_batch(_x)
                if not isinstance(outputs, list):
                    outputs = [outputs]

                chunk_size += sum(o.nbytes for o in outputs)

                for l, o in zip(selected_layers, outputs):
                    z[l].append(o)

                y.append(_y)
                samples_seen += _x.shape[0]
                chunk_p = int(100 * (samples_seen / data.n))

                if chunk_p % 10 == 0:
                    if not displayed_once:
                        print('\n%i%% (shape=%s, size=%.2f MB)' %
                              (chunk_p, _x.shape, chunk_size / 1024**2),
                              flush=True,
                              end='')
                        displayed_once = True
                else:
                    displayed_once = False
                    print('.', end='')

            for layer in selected_layers:
                z[layer] = np.concatenate(z[layer])

            with open(output_file_name % part_id, 'wb') as f:
                pickle.dump(
                    {
                        'data':
                        z,
                        'target':
                        np.concatenate(y),
                        'names':
                        np.asarray(data.filenames[chunk_start:samples_seen])
                    }, f, pickle.HIGHEST_PROTOCOL)
            part_id += 1
    print('done.')
def complex_model2(input_length, output_levels, stride, receptive_field, nb_filters_, loading=False, path=""):
    fnn_init = 'he_uniform'
    def residual_block(input_):
        original = input_
        tanh_ = AtrousConvolution1D(
            nb_filter=nb_filters_,
            filter_length=2,
            atrous_rate=2**i,
            init=fnn_init,
            border_mode='valid',
            bias=False,
            causal=True,
            activation='tanh',
            name='AtrousConv1D_%d_tanh' % (2**i)
        )(input_)

        sigmoid_ = AtrousConvolution1D(
            nb_filter=nb_filters_,
            filter_length=2,
            atrous_rate=2**i,
            init=fnn_init,
            border_mode='valid',
            bias=False,
            causal=True,
            activation='sigmoid',
            name='AtrousConv1D_%d_sigm' % (2**i)
        )(input_)

        input_ = Merge(mode='mul')([tanh_, sigmoid_])

        res_x = Convolution1D(nb_filter=nb_filters_, filter_length=1, border_mode='same', bias=False)(input_)
        skip_c = res_x
        res_x = Merge(mode='sum')([original, res_x])

        return res_x, skip_c

    input = Input(shape=(input_length, output_levels), name='input_part')
    skip_connections = []
    output = input
    output = AtrousConvolution1D(
        nb_filter=nb_filters_,
        filter_length=2,
        atrous_rate=1,
        init=fnn_init,
        activation='relu',
        border_mode='valid',
        causal=True,
        name='initial_AtrousConv1D'
    )(output)

    for i in range( int(np.log2( receptive_field ) ) ):
        output, skip_c = residual_block(output)
        skip_connections.append(skip_c)

    out = Merge(mode='sum')(skip_connections)

    for _ in range(2):
        out = Activation('relu')(out)
        out = Convolution1D(output_levels, 1, activation=None, border_mode='same')(out)
    out = Activation('softmax', name='output_softmax')(out)

    #out = Reshape((dim1, nb_filters_*dim2))(out)
    output = TimeDistributed(Dense(output_dim=output_levels, init=fnn_init, activation='softmax'))(out)

    m = Model(input, output)
    if loading:
        m.load_weights(path)
        print "Weights loaded!"
    #ADAM = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-05, decay=0.0)
    m.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return m
                                       similar_persons,
                                       similar_matrix,
                                       train=True,
                                       source_datas=class_img_labels),
        triplet_generator_by_rank_list(train_images,
                                       batch_size,
                                       similar_persons,
                                       similar_matrix,
                                       train=False,
                                       source_datas=class_img_labels),
        source_pair_model_path,
        target_model_path,
        batch_size=batch_size)


if __name__ == '__main__':
    pair_model = load_model('../pretrain/cuhk_pair_pretrain.h5')
    # pair_model = load_model('../cuhk_market-rank_transfer.h5')
    base_model = pair_model.layers[3]
    base_model = Model(inputs=base_model.get_input_at(0),
                       outputs=[base_model.get_output_at(0)],
                       name='resnet50')
    print isinstance(base_model.layers[-20], Conv2D)
    print isinstance(base_model.layers[-20], BatchNormalization)

    rank_transfer_2dataset(
        '../pretrain/cuhk_pair_pretrain.h5', '../dataset/market_train.list',
        'rank_transfer_test.h5', '/home/cwh/coding/Market-1501/train',
        '/home/cwh/coding/rank-reid/data_clean/cross_filter_pid.log',
        '/home/cwh/coding/rank-reid/data_clean/cross_filter_score.log')
Beispiel #19
0
def test_rpn():
    parser = OptionParser()

    parser.add_option("-p", "--path", dest="test_path", help="Path to test data.",
                      default='/Users/jie/projects/PanelSeg/ExpRcnn/eval.txt')
    parser.add_option("-n", "--num_rois", type="int", dest="num_rois",
                      help="Number of ROIs per iteration. Higher means more memory use.", default=32)
    parser.add_option("--config_filename", dest="config_filename",
                      help="Location to read the metadata related to the training (generated when training).",
                      default="config.pickle")
    parser.add_option("--network", dest="network", help="Base network to use. Supports nn_cnn_3_layer.",
                      default='nn_cnn_3_layer')
    parser.add_option("--rpn_weight_path", dest="rpn_weight_path",
                      default='/Users/jie/projects/PanelSeg/ExpRcnn/models/model_rpn_3_layer_color-0.0577.hdf5')
    parser.add_option("--classify_model_path", dest="classify_model_path",
                      default='/Users/jie/projects/PanelSeg/ExpRcnn/models/label50+bg_cnn_3_layer_color-0.9910.h5')

    (options, args) = parser.parse_args()

    if not options.test_path:  # if filename is not given
        parser.error('Error: path to test data must be specified. Pass --path to command line')

    config_output_filename = options.config_filename

    with open(config_output_filename, 'rb') as f_in:
        c = pickle.load(f_in)

    import nn_cnn_3_layer as nn

    # turn off any data augmentation at test time
    c.use_horizontal_flips = False
    c.use_vertical_flips = False
    c.rot_90 = False

    img_list_path = options.test_path

    def format_img_size(img, C):
        """ formats the image size based on config """
        img_min_side = float(C.im_size)
        (height, width, _) = img.shape

        if width <= height:
            ratio = img_min_side / width
            new_height = int(ratio * height)
            new_width = int(img_min_side)
        else:
            ratio = img_min_side / height
            new_width = int(ratio * width)
            new_height = int(img_min_side)
        img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
        return img, ratio

    def format_img_channels(img, C):
        """ formats the image channels based on config """
        # img = img[:, :, (2, 1, 0)]
        img = img.astype(np.float32)
        img[:, :, 0] -= C.img_channel_mean[0]
        img[:, :, 1] -= C.img_channel_mean[1]
        img[:, :, 2] -= C.img_channel_mean[2]
        img /= 255

        # img /= C.img_scaling_factor
        img = np.transpose(img, (2, 0, 1))
        img = np.expand_dims(img, axis=0)
        return img

    def format_img(img, C):
        """ formats an image for model prediction based on config """
        # img, ratio = format_img_size(img, C)
        img = format_img_channels(img, C)
        return img, 1.0

    # Method to transform the coordinates of the bounding box to its original size
    def get_real_coordinates(ratio, x1, y1, x2, y2):

        real_x1 = int(round(x1 // ratio))
        real_y1 = int(round(y1 // ratio))
        real_x2 = int(round(x2 // ratio))
        real_y2 = int(round(y2 // ratio))

        return (real_x1, real_y1, real_x2, real_y2)

    class_mapping = c.class_mapping

    if 'bg' not in class_mapping:
        class_mapping['bg'] = len(class_mapping)

    class_mapping = {v: k for k, v in class_mapping.items()}
    print(class_mapping)
    class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}
    c.num_rois = int(options.num_rois)

    # if c.network == 'resnet50':
    #     num_features = 1024
    # elif c.network == 'vgg':
    #     num_features = 512

    input_shape_img = (None, None, 3)
    # input_shape_features = (None, None, num_features)

    img_input = Input(shape=input_shape_img)
    roi_input = Input(shape=(c.num_rois, 4))
    # feature_map_input = Input(shape=input_shape_features)

    # define the base network
    shared_layers = nn.nn_base(img_input, trainable=True)

    # define the RPN, built on the base layers
    num_anchors = len(c.anchor_box_scales) * len(c.anchor_box_ratios)
    rpn_layers = nn.rpn(shared_layers, num_anchors)

    # classifier = nn.classifier(feature_map_input, roi_input, c.num_rois, nb_classes=len(class_mapping), trainable=True)

    model_rpn = Model(img_input, rpn_layers)
    # model_classifier_only = Model([feature_map_input, roi_input], classifier)

    # model_classifier = Model([feature_map_input, roi_input], classifier)

    print('Loading weights from {}'.format(c.model_path))
    model_rpn.load_weights(options.rpn_weight_path, by_name=True)
    # model_classifier.load_weights(c.model_path, by_name=True)

    model_rpn.compile(optimizer='sgd', loss='mse')
    # model_classifier.compile(optimizer='sgd', loss='mse')
    model_rpn.summary()

    model_classifier = load_model(options.classify_model_path)
    model_classifier.summary()

    all_imgs = []

    classes = {}

    bbox_threshold = 0.8

    visualise = True

    with open(img_list_path) as f:
        lines = f.readlines()

    for idx, filepath in enumerate(lines):
        print(filepath)
        st = time.time()
        filepath = filepath.strip()
        figure = Figure(filepath)
        figure.load_image()
        img = figure.image

        X, ratio = format_img(img, c)

        if K.image_dim_ordering() == 'tf':
            X = np.transpose(X, (0, 2, 3, 1))

        # get the feature maps and output from the RPN
        [Y1, Y2, F] = model_rpn.predict(X)

        R = label_rcnn_roi_helpers.rpn_to_roi(Y1, Y2, c, K.image_dim_ordering(), overlap_thresh=0.7)

        # convert from (x1,y1,x2,y2) to (x,y,w,h)
        R[:, 2] -= R[:, 0]
        R[:, 3] -= R[:, 1]

        patches = np.empty([R.shape[0], 28, 28, 3], dtype=int)

        for idx, roi in enumerate(R):
            x, y, w, h = roi[0], roi[1], roi[2], roi[3]
            patch = figure.image[y:y + h, x:x + w]
            patches[idx] = cv2.resize(patch, (28, 28))

        patches = patches.astype('float32')
        patches[:, :, :, 0] -= c.img_channel_mean[0]
        patches[:, :, :, 1] -= c.img_channel_mean[1]
        patches[:, :, :, 2] -= c.img_channel_mean[2]
        patches /= 255

        prediction = model_classifier.predict(patches)

        # # apply the spatial pyramid pooling to the proposed regions
        # bboxes = {}
        # probs = {}
        #
        # for jk in range(R.shape[0] // c.num_rois + 1):
        #     ROIs = np.expand_dims(R[c.num_rois * jk:c.num_rois * (jk + 1), :], axis=0)
        #     if ROIs.shape[1] == 0:
        #         break
        #
        #     if jk == R.shape[0] // c.num_rois:
        #         # pad R
        #         curr_shape = ROIs.shape
        #         target_shape = (curr_shape[0], c.num_rois, curr_shape[2])
        #         ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
        #         ROIs_padded[:, :curr_shape[1], :] = ROIs
        #         ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
        #         ROIs = ROIs_padded
        #
        #     # [P_cls, P_regr] = model_classifier_only.predict([F, ROIs])
        #
        #     for ii in range(P_cls.shape[1]):
        #
        #         if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
        #             continue
        #
        #         cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]
        #
        #         if cls_name not in bboxes:
        #             bboxes[cls_name] = []
        #             probs[cls_name] = []
        #
        #         (x, y, w, h) = ROIs[0, ii, :]
        #
        #         cls_num = np.argmax(P_cls[0, ii, :])
        #         try:
        #             (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
        #             tx /= c.classifier_regr_std[0]
        #             ty /= c.classifier_regr_std[1]
        #             tw /= c.classifier_regr_std[2]
        #             th /= c.classifier_regr_std[3]
        #             x, y, w, h = label_rcnn_roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
        #         except:
        #             pass
        #         bboxes[cls_name].append(
        #             [c.rpn_stride * x, c.rpn_stride * y, c.rpn_stride * (x + w), c.rpn_stride * (y + h)])
        #         probs[cls_name].append(np.max(P_cls[0, ii, :]))
        #
        # all_dets = []

        # for key in bboxes:
        #     bbox = np.array(bboxes[key])
        #
        #     new_boxes, new_probs = label_rcnn_roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5)
        #     for jk in range(new_boxes.shape[0]):
        #         (x1, y1, x2, y2) = new_boxes[jk, :]
        #
        #         (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)
        #
        #         cv2.rectangle(img, (real_x1, real_y1), (real_x2, real_y2),
        #                       (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),
        #                       2)
        #
        #         textLabel = '{}: {}'.format(key, int(100 * new_probs[jk]))
        #         all_dets.append((key, 100 * new_probs[jk]))
        #
        #         (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1)
        #         textOrg = (real_x1, real_y1 - 0)
        #
        #         cv2.rectangle(img, (textOrg[0] - 5, textOrg[1] + baseLine - 5),
        #                       (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2)
        #         cv2.rectangle(img, (textOrg[0] - 5, textOrg[1] + baseLine - 5),
        #                       (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1)
        #         cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)

        print('Elapsed time = {}'.format(time.time() - st))
        # print(all_dets)
        cv2.imshow('img', img)
        cv2.waitKey(0)
Beispiel #20
0
def train_pair_predict(pair_model_path, target_train_path, pid_path, score_path):
    model = load_model(pair_model_path)
    model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
                  outputs=[model.get_layer('resnet50').get_output_at(0)])
    train_predict(model, target_train_path, pid_path, score_path)
Beispiel #21
0
def train_label_none_label_classification(label_folder, non_label_folder, model_file=None):

    c = Config()

    #  Build or load model
    if model_file is None:
        # create model
        img_input = Input(shape=(28, 28, 3))
        # prediction = model_cnn_2_layer.nn_classify_label_non_label(img_input)
        # prediction = model_cnn_3_layer.nn_classify_label_non_label(img_input)
        prediction = nn_cnn_3_layer.nn_classify_label_non_label(img_input)
        model = Model(inputs=img_input, outputs=prediction)
        model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
    else:
        model = load_model(model_file)

    model.summary()

    # Load and normalize data
    x_train, y_train, x_test, y_test = load_train_validation_data(label_folder, non_label_folder)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')

    x_train[:, :, :, 0] -= c.img_channel_mean[0]
    x_train[:, :, :, 1] -= c.img_channel_mean[1]
    x_train[:, :, :, 2] -= c.img_channel_mean[2]
    x_test[:, :, :, 0] -= c.img_channel_mean[0]
    x_test[:, :, :, 1] -= c.img_channel_mean[1]
    x_test[:, :, :, 2] -= c.img_channel_mean[2]

    x_train /= 255
    x_test /= 255
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # x_train.reshape(x_train.shape[0], 28, 28, 3)
    # x_test.reshape(x_test.shape[0], 28, 28, 3)

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, 2)
    y_test = keras.utils.to_categorical(y_test, 2)

    # Checkpointing is to save the network weights only when there is an improvement in classification accuracy
    # on the validation dataset (monitor=’val_acc’ and mode=’max’).
    file_path = "weights-improvement-{epoch:04d}-{val_acc:.4f}.hdf5"
    checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
    callbacks_list = [checkpoint]

    model.fit(x_train, y_train,
              batch_size=128,
              epochs=100,
              verbose=1,
              callbacks=callbacks_list,
              validation_data=(x_test, y_test)
              )
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    model.save('final_model.h5')
Beispiel #22
0
def train_rank_predict(rank_model_path, target_train_path, pid_path, score_path):
    model = load_model(rank_model_path, custom_objects={'cross_entropy_loss': cross_entropy_loss})
    model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
                  outputs=[model.get_layer('resnet50').get_output_at(0)])
    train_predict(model, target_train_path, pid_path, score_path)
Beispiel #23
0
    ll = [[[1, 0.5, 0.9, 0, 0.3], [0.5, 0, 0, 0, 1]]]
    q_att = np.array([[[1, 1] + [1] * 5, [1] * 7]])
    d_att = np.ones((1, 5, 7))
    d_att[0, 3:, :] = 0
    # 1)
    print 'q attention'
    print q_att
    print 'd attention'
    print d_att
    trans_mtx = np.array(ll)
    print trans_mtx

    # 2) + 4)
    kp = Model(inputs=[
        att_knrm.l_field_translation[0], att_knrm.l_q_att_in,
        att_knrm.l_field_att_in[0]
    ],
               outputs=att_knrm.l_d_layer[0])
    kp_res = kp.predict([trans_mtx, q_att, d_att])
    print 'raw kernel scores with attention'
    print kp_res.shape
    print kp_res
    print 'first kernel scores'
    print kp_res[0, :, :, 0]

    # 3)
    kp = Model(inputs=[
        att_knrm.l_field_translation[0], att_knrm.l_q_att_in,
        att_knrm.l_field_att_in[0]
    ],
               outputs=att_knrm.kp_logsum(att_knrm.l_d_layer[0]))
def extract_features(input_dir,
                     output_dir,
                     model_type='inceptionv3',
                     batch_size=32):
    """
    Extracts features from a CNN trained on ImageNet classification from all
    videos in a directory.

    Args:
        input_dir (str): Input directory of videos to extract from.
        output_dir (str): Directory where features should be stored.
        model_type (str): Model type to use.
        batch_size (int): Batch size to use when processing.
    """

    input_dir = os.path.expanduser(input_dir)
    output_dir = os.path.expanduser(output_dir)

    if not os.path.isdir(input_dir):
        sys.stderr.write("Input directory '%s' does not exist!\n" % input_dir)
        sys.exit(1)

    # Load desired ImageNet model

    # Note: import Keras only when needed so we don't waste time revving up

    #       Theano/TensorFlow needlessly in case of an error

    model = None
    input_shape = (224, 224)
    shape = input_shape

    if model_type.lower() == 'inceptionv3':
        from keras.applications import InceptionV3
        model = InceptionV3(include_top=True, weights='imagenet')
    elif model_type.lower() == 'xception':
        from keras.applications import Xception
        model = Xception(include_top=True, weights='imagenet')
    elif model_type.lower() == 'resnet50':
        from keras.applications import ResNet50
        model = ResNet50(include_top=True, weights='imagenet')
    elif model_type.lower() == 'vgg16':
        from keras.applications import VGG16
        model = VGG16(include_top=True, weights='imagenet')
    elif model_type.lower() == 'vgg19':
        from keras.applications import VGG19
        model = VGG19(include_top=True, weights='imagenet')
    elif model_type.lower() == 'vggface':
        from keras.engine import Model
        from keras.layers import Input
        from keras_vggface.vggface import VGGFace
        model = VGGFace(include_top=True)  # pooling: None, avg or max
    else:
        sys.stderr.write("'%s' is not a valid ImageNet model.\n" % model_type)
        sys.exit(1)

    if model_type.lower() == 'inceptionv3' or model_type.lower() == 'xception':
        shape = (299, 299)

    # Get outputs of model from layer just before softmax predictions

    from keras.models import Model
    model = Model(model.inputs, output=model.layers[-2].output)

    # Create output directories

    visual_dir = output_dir  # RGB features
    #motion_dir = os.path.join(output_dir, 'motion') # Spatiotemporal features
    #opflow_dir = os.path.join(output_dir, 'opflow') # Optical flow features

    for directory in [visual_dir]:  #, motion_dir, opflow_dir]:
        if not os.path.exists(directory):
            os.makedirs(directory)

    # Find all videos that need to have features extracted

    def is_video(x):
        return x.endswith('.mp4') or x.endswith('.avi') or x.endswith('.mov')

    vis_existing = [x.split('.')[0] for x in os.listdir(visual_dir)]
    #mot_existing = [os.path.splitext(x)[0] for x in os.listdir(motion_dir)]
    #flo_existing = [os.path.splitext(x)[0] for x in os.listdir(opflow_dir)]

    video_filenames = [
        x for x in sorted(os.listdir(input_dir))
        if is_video(x) and os.path.splitext(x)[0] not in vis_existing
    ]

    # Go through each video and extract features

    from keras.applications.imagenet_utils import preprocess_input

    for video_filename in tqdm(video_filenames):

        # Open video clip for reading
        try:
            clip = VideoFileClip(os.path.join(input_dir, video_filename))
        except Exception as e:
            sys.stderr.write("Unable to read '%s'. Skipping...\n" %
                             video_filename)
            sys.stderr.write("Exception: {}\n".format(e))
            continue

            # Sample frames at 1fps
        fps = int(np.round(clip.fps))
        frames = [
            scipy.misc.imresize(crop_center(x.astype(np.float32)), shape)
            for idx, x in enumerate(clip.iter_frames())
        ]
        n_frames = len(frames)

        frames_arr = np.empty((n_frames, ) + shape + (3, ), dtype=np.float32)
        for idx, frame in enumerate(frames):
            frames_arr[idx, :, :, :] = frame

        frames_arr = preprocess_input(frames_arr)

        features = model.predict(frames_arr, batch_size=batch_size)

        name, _ = os.path.splitext(video_filename)
        feat_filepath = os.path.join(visual_dir, name + '.npy')

        with open(feat_filepath, 'wb') as f:
            np.save(f, features)
Beispiel #25
0
def build_model(embeddings):
    # input representation features
    words_input = Input(shape=[SEQUENCE_LEN], dtype='int32')
    pos1_input = Input(shape=[SEQUENCE_LEN], dtype='int32')
    pos2_input = Input(shape=[SEQUENCE_LEN], dtype='int32')
    segs_input = Input(shape=[SEQUENCE_LEN, 3], dtype='float32')

    # lexical features
    e1_input = Input(shape=[ENTITY_LEN], dtype='int32')  # L1
    e2_input = Input(shape=[ENTITY_LEN], dtype='int32')  # L2
    e1context_input = Input(shape=[2], dtype='int32')  # L3
    e2context_input = Input(shape=[2], dtype='int32')  # L4

    # word embedding
    we = embeddings["word_embeddings"]
    words_embed = Embedding(we.shape[0], we.shape[1], weights=[we])
    words = words_embed(words_input)
    e1 = words_embed(e1_input)
    e2 = words_embed(e2_input)
    e1context = words_embed(e1context_input)
    e2context = words_embed(e2context_input)

    # lexical feature
    e1_flat = Flatten()(e1)
    e2_flat = Flatten()(e2)
    e1context_flat = Flatten()(e1context)
    e2context_flat = Flatten()(e2context)

    # position embedding
    pe1 = embeddings["position_embeddings_1"]
    pos1 = Embedding(pe1.shape[0], pe1.shape[1], weights=[pe1])(pos1_input)
    pe2 = embeddings["position_embeddings_2"]
    pos2 = Embedding(pe2.shape[0], pe2.shape[1], weights=[pe2])(pos2_input)

    # input representation
    input_repre = Concatenate()([words, pos1, pos2])
    input_repre = Dropout(DROPOUT)(input_repre)

    # input attention
    e1_repeat = RepeatVector(SEQUENCE_LEN)(e1_flat)
    e2_repeat = RepeatVector(SEQUENCE_LEN)(e2_flat)
    concat = Concatenate()([words, e1_repeat, e2_repeat])
    alpha = Dense(1, activation="softmax")(concat)
    alpha = Reshape([SEQUENCE_LEN])(alpha)
    alpha = RepeatVector(WORD_REPRE_SIZE)(alpha)
    alpha = Permute([2, 1])(alpha)
    input_repre = Multiply()([input_repre, alpha])

    # word-level convolution
    input_conved = Conv1D(filters=NB_FILTERS_WORD,
                          kernel_size=WINDOW_SIZE_WORD,
                          padding="same",
                          activation="relu",
                          kernel_initializer=TruncatedNormal(stddev=0.1),
                          bias_initializer=Constant(0.1))(input_repre)
    input_pooled = PiecewiseMaxPool()([input_conved, segs_input])

    # fully connected
    outputs = [input_pooled, e1_flat, e2_flat, e1context_flat, e2context_flat]
    output = Concatenate()(outputs)
    output = Dropout(DROPOUT)(output)
    output = Dense(
        units=NB_RELATIONS,
        activation="softmax",
        kernel_initializer=TruncatedNormal(stddev=0.1),
        bias_initializer=Constant(0.1),
        kernel_regularizer='l2',
        bias_regularizer='l2',
    )(output)

    model = Model(inputs=[
        words_input, pos1_input, pos2_input, e1_input, e2_input,
        e1context_input, e2context_input, segs_input
    ],
                  outputs=[output])
    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer='sgd',
                  metrics=['accuracy'])
    # model.summary()
    return model
Beispiel #26
0
def att_res_ds_unet_model(input_shape=(4, 128, 128, 128),
                          n_base_filters=16,
                          depth=5,
                          dropout_rate=0.3,
                          n_segmentation_levels=3,
                          n_labels=4,
                          optimizer=Adam,
                          initial_learning_rate=5e-4,
                          loss_function=weighted_dice_coefficient_loss,
                          activation_name="sigmoid"):
    """
    This function builds a model proposed by Isensee et al. for the BRATS 2017 competition:
    https://www.cbica.upenn.edu/sbia/Spyridon.Bakas/MICCAI_BraTS/MICCAI_BraTS_2017_proceedings_shortPapers.pdf

    This network is highly similar to the model proposed by Kayalibay et al. "CNN-based Segmentation of Medical
    Imaging Data", 2017: https://arxiv.org/pdf/1701.03056.pdf


    :param input_shape:
    :param n_base_filters:
    :param depth:
    :param dropout_rate:
    :param n_segmentation_levels:
    :param n_labels:
    :param optimizer:
    :param initial_learning_rate:
    :param loss_function:
    :param activation_name:
    :return:
    """
    inputs = Input(input_shape)

    current_layer = inputs
    # 每个深度的输出
    level_output_layers = list()
    level_filters = list()
    for level_number in range(depth):  # depth=5; [0,1,2,3,4]
        # Encoder结构构建 (残差)

        n_level_filters = (2**
                           level_number) * n_base_filters  # 2^level_num * 16
        level_filters.append(n_level_filters)

        # convolution_block:继承自原始unet,卷积->(正则)->激活(ReLU/LeakyReLU)
        if current_layer is inputs:
            in_conv = create_convolution_block(
                current_layer, n_level_filters)  # 第一层通道数*4,尺寸不变
        else:
            in_conv = create_convolution_block(current_layer,
                                               n_level_filters,
                                               strides=(2, 2, 2))  # 通道数*2,尺寸减半

        # 残差单元:conv_block->dropout->conv_block
        context_output_layer = create_context_module(in_conv,
                                                     n_level_filters,
                                                     dropout_rate=dropout_rate)

        # 残差模块:残差单元+in_conv
        summation_layer = Add()([in_conv, context_output_layer])
        level_output_layers.append(summation_layer)
        current_layer = summation_layer

    segmentation_layers = list()
    for level_number in range(depth - 2, -1, -1):  # [3,2,1,0]
        # attention
        gating = gating_signal(level_output_layers[level_number + 1],
                               level_filters[level_number], True)
        att = attention_block(level_output_layers[level_number], gating,
                              level_filters[level_number])

        # 上采样
        # 上采样放大一倍,卷积减少一半通道->conv_block
        up_sampling = create_up_sampling_module(current_layer,
                                                level_filters[level_number])
        # concat:skip connection
        # concatenation_layer = concatenate([level_output_layers[level_number], up_sampling], axis=1)
        concatenation_layer = concatenate([att, up_sampling], axis=1)
        # concat后两次卷积channel减半
        localization_output = create_localization_module(
            concatenation_layer, level_filters[level_number])
        current_layer = localization_output
        if level_number < n_segmentation_levels:  # <3; [2,1,0]时记录
            # 记录层
            segmentation_layers.insert(
                0,
                Conv3D(n_labels, (1, 1, 1))(current_layer))

    output_layer = None
    for level_number in reversed(range(n_segmentation_levels)):
        segmentation_layer = segmentation_layers[level_number]
        if output_layer is None:
            output_layer = segmentation_layer
        else:
            output_layer = Add()([output_layer, segmentation_layer])

        if level_number > 0:
            output_layer = UpSampling3D(size=(2, 2, 2))(output_layer)

    # sigmoid激活输出
    activation_block = Activation(activation_name)(output_layer)

    model = Model(inputs=inputs, outputs=activation_block)
    model.compile(optimizer=optimizer(lr=initial_learning_rate),
                  loss=loss_function)
    # 返回模型
    return model
Beispiel #27
0
def unet_model_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001, deconvolution=False,
                  depth=4, n_base_filters=32, include_label_wise_dice_coefficients=False, metrics=dice_coefficient,
                  batch_normalization=False, activation_name="sigmoid"):
    """
    Builds the 3D UNet Keras model.f
    :param metrics: List metrics to be calculated during model training (default is dice coefficient).
    :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
    coefficient for each label as metric.
    :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
    layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
    to train the model.
    :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
    layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
    :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
    divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
    :param pool_size: Pool size for the max pooling operations.
    :param n_labels: Number of binary labels that the model is learning.
    :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
    :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
    increases the amount memory required during training.
    :return: Untrained 3D UNet Model
    """
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    # add levels with max pooling
    for layer_depth in range(depth):
        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**layer_depth),
                                          batch_normalization=batch_normalization)
        layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**layer_depth)*2,
                                          batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    # add levels with up-convolution or up-sampling
    for layer_depth in range(depth-2, -1, -1):
        up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution,
                                            n_filters=current_layer._keras_shape[1])(current_layer)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=concat, batch_normalization=batch_normalization)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=current_layer,
                                                 batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation(activation_name)(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    if include_label_wise_dice_coefficients and n_labels > 1:
        label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(n_labels)]
        if metrics:
            metrics = metrics + label_wise_dice_metrics
        else:
            metrics = label_wise_dice_metrics

    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
    return model
Beispiel #28
0
def isensee2017_model(input_shape=(4, 128, 128, 128),
                      n_base_filters=16,
                      depth=5,
                      dropout_rate=0.3,
                      n_segmentation_levels=3,
                      n_labels=4,
                      optimizer=Adam,
                      initial_learning_rate=5e-4,
                      loss_function=weighted_dice_coefficient_loss,
                      activation_name="sigmoid"):
    """
    This function builds a model proposed by Isensee et al. for the BRATS 2017 competition:
    https://www.cbica.upenn.edu/sbia/Spyridon.Bakas/MICCAI_BraTS/MICCAI_BraTS_2017_proceedings_shortPapers.pdf

    This network is highly similar to the model proposed by Kayalibay et al. "CNN-based Segmentation of Medical
    Imaging Data", 2017: https://arxiv.org/pdf/1701.03056.pdf


    :param input_shape:
    :param n_base_filters:
    :param depth:
    :param dropout_rate:
    :param n_segmentation_levels:
    :param n_labels:
    :param optimizer:
    :param initial_learning_rate:
    :param loss_function:
    :param activation_name:
    :return:
    """
    inputs = Input(input_shape)

    current_layer = inputs
    level_output_layers = list()
    level_filters = list()
    for level_number in range(depth):
        n_level_filters = (2**level_number) * n_base_filters
        level_filters.append(n_level_filters)

        if current_layer is inputs:
            in_conv = create_convolution_block(current_layer, n_level_filters)
        else:
            in_conv = create_convolution_block(current_layer,
                                               n_level_filters,
                                               strides=(2, 2, 2))

        context_output_layer = create_context_module(in_conv,
                                                     n_level_filters,
                                                     dropout_rate=dropout_rate)

        summation_layer = Add()([in_conv, context_output_layer])
        level_output_layers.append(summation_layer)
        current_layer = summation_layer

    segmentation_layers = list()
    for level_number in range(depth - 2, -1, -1):
        up_sampling = create_up_sampling_module(current_layer,
                                                level_filters[level_number])
        concatenation_layer = concatenate(
            [level_output_layers[level_number], up_sampling], axis=1)
        localization_output = create_localization_module(
            concatenation_layer, level_filters[level_number])
        current_layer = localization_output
        if level_number < n_segmentation_levels:
            segmentation_layers.insert(
                0,
                create_convolution_block(current_layer,
                                         n_filters=n_labels,
                                         kernel=(1, 1, 1)))

    output_layer = None
    for level_number in reversed(range(n_segmentation_levels)):
        segmentation_layer = segmentation_layers[level_number]
        if output_layer is None:
            output_layer = segmentation_layer
        else:
            output_layer = Add()([output_layer, segmentation_layer])

        if level_number > 0:
            output_layer = UpSampling3D(size=(2, 2, 2))(output_layer)

    activation_block = Activation(activation_name)(output_layer)

    model = Model(inputs=inputs, outputs=activation_block)
    model.compile(optimizer=optimizer(lr=initial_learning_rate),
                  loss=loss_function)
    return model
Beispiel #29
0
    def make(self, theano_kwargs=None):
        '''Make the model and compile it. 

        Igor's config options control everything.  

        Arg:
            theano_kwargs as dict for debugging theano or submitting something custom
        '''

        if self.igor.embedding_type == "convolutional":
            make_convolutional_embedding(self.igor)
        elif self.igor.embedding_type == "token":
            make_token_embedding(self.igor)
        elif self.igor.embedding_type == "shallowconv":
            make_shallow_convolutional_embedding(self.igor)
        elif self.igor.embedding_type == "minimaltoken":
            make_minimal_token_embedding(self.igor)
        else:
            raise Exception("Incorrect embedding type")

        B = self.igor.batch_size
        spine_input_shape = (B, self.igor.max_num_supertags)
        child_input_shape = (B, 1)
        parent_input_shape = (B, 1)

        E, V = self.igor.word_embedding_size, self.igor.word_vocab_size  # for word embeddings

        repeat_N = self.igor.max_num_supertags  # for lex
        mlp_size = self.igor.mlp_size

        ## dropout parameters
        p_emb = self.igor.p_emb_dropout
        p_W = self.igor.p_W_dropout
        p_U = self.igor.p_U_dropout
        w_decay = self.igor.weight_decay
        p_mlp = self.igor.p_mlp_dropout

        def predict_params():
            return {
                'output_dim': 1,
                'W_regularizer': l2(w_decay),
                'activation': 'relu',
                'b_regularizer': l2(w_decay)
            }

        dspineset_in = Input(batch_shape=spine_input_shape,
                             name='daughter_spineset_in',
                             dtype='int32')
        pspineset_in = Input(batch_shape=spine_input_shape,
                             name='parent_spineset_in',
                             dtype='int32')
        dhead_in = Input(batch_shape=child_input_shape,
                         name='daughter_head_input',
                         dtype='int32')
        phead_in = Input(batch_shape=parent_input_shape,
                         name='parent_head_input',
                         dtype='int32')
        dspine_in = Input(batch_shape=child_input_shape,
                          name='daughter_spine_input',
                          dtype='int32')
        inputs = [dspineset_in, pspineset_in, dhead_in, phead_in, dspine_in]

        ### Layer functions
        ############# Convert the word indices to vectors
        F_embedword = Embedding(input_dim=V,
                                output_dim=E,
                                mask_zero=True,
                                W_regularizer=l2(w_decay),
                                dropout=p_emb)

        if self.igor.saved_embeddings is not None:
            self.logger.info("+ Cached embeddings loaded")
            F_embedword.initial_weights = [self.igor.saved_embeddings]

        ###### Prediction Functions
        ## these functions learn a vector which turns a tensor into a matrix of probabilities

        ### P(Parent supertag | Child, Context)
        F_parent_predict = ProbabilityTensor(
            name='parent_predictions',
            dense_function=Dense(**predict_params()))
        ### P(Leaf supertag)
        F_leaf_predict = ProbabilityTensor(
            name='leaf_predictions', dense_function=Dense(**predict_params()))

        ###### Network functions.
        ##### Input word, correct its dimensions (basically squash in a certain way)
        F_singleword = compose(Fix(), F_embedword)
        ##### Input spine, correct diemnsions, broadcast across 1st dimension
        F_singlespine = compose(RepeatVector(repeat_N), Fix(),
                                self.igor.F_embedspine)
        ##### Concatenate and map to a single space
        F_alignlex = compose(
            RepeatVector(repeat_N), Dropout(p_mlp),
            Dense(mlp_size, activation='relu', name='dense_align_lex'), concat)

        F_alignall = compose(
            Distribute(Dropout(p_mlp), name='distribute_align_all_dropout'),
            Distribute(Dense(mlp_size,
                             activation='relu',
                             name='align_all_dense'),
                       name='distribute_align_all_dense'), concat)
        F_alignleaf = compose(
            Distribute(
                Dropout(p_mlp * 0.66), name='distribute_leaf_dropout'
            ),  ### need a separate oen because the 'concat' is different for the two situations
            Distribute(Dense(mlp_size, activation='relu', name='leaf_dense'),
                       name='distribute_leaf_dense'),
            concat)

        ### embed and form all of the inputs into their components
        ### note: spines == supertags. early word choice, haven't refactored.
        leaf_spines = self.igor.F_embedspine(dspineset_in)
        pspine_context = self.igor.F_embedspine(pspineset_in)
        dspine_single = F_singlespine(dspine_in)

        dhead = F_singleword(dhead_in)
        phead = F_singleword(phead_in)

        ### combine the lexical material
        lexical_context = F_alignlex([dhead, phead])

        #### P(Parent Supertag | Daughter Supertag, Lexical Context)
        ### we know the daughter spine, want to know the parent spine
        ### size is (batch, num_supertags)
        parent_problem = F_alignall(
            [lexical_context, dspine_single, pspine_context])

        ### we don't have the parent, we just have a leaf
        leaf_problem = F_alignleaf([lexical_context, leaf_spines])

        parent_predictions = F_parent_predict(parent_problem)
        leaf_predictions = F_leaf_predict(leaf_problem)
        predictions = [parent_predictions, leaf_predictions]

        theano_kwargs = theano_kwargs or {}
        ## make it quick so i can load in the weights.
        self.model = Model(input=inputs,
                           output=predictions,
                           preloaded_data=self.igor.preloaded_data,
                           **theano_kwargs)

        #mask_cache = traverse_nodes(parent_prediction)
        #desired_masks = ['merge_3.in.mask.0']
        #self.p_tensor = K.function(inputs+[K.learning_phase()], [parent_predictions, F_parent_predict.inbound_nodes[0].input_masks[0]])

        if self.igor.from_checkpoint:
            self.load_checkpoint_weights()
        elif not self.igor.in_training:
            raise Exception("No point in running this without trained weights")

        if not self.igor.in_training:
            expanded_children = RepeatVector(repeat_N, axis=2)(leaf_spines)
            expanded_parent = RepeatVector(repeat_N, axis=1)(pspine_context)
            expanded_lex = RepeatVector(repeat_N, axis=1)(
                lexical_context
            )  # axis here is arbitary; its repeating on 1 and 2, but already repeated once
            huge_tensor = concat(
                [expanded_lex, expanded_children, expanded_parent])
            densely_aligned = LastDimDistribute(
                F_alignall.get(1).layer)(huge_tensor)
            output_predictions = Distribute(
                F_parent_predict, force_reshape=True)(densely_aligned)

            primary_inputs = [phead_in, dhead_in, pspineset_in, dspineset_in]
            leaf_inputs = [phead_in, dhead_in, dspineset_in]

            self.logger.info("+ Compiling prediction functions")
            self.inner_func = K.Function(primary_inputs + [K.learning_phase()],
                                         output_predictions)
            self.leaf_func = K.Function(leaf_inputs + [K.learning_phase()],
                                        leaf_predictions)
            try:
                self.get_ptensor = K.function(
                    primary_inputs + [K.learning_phase()], [
                        output_predictions,
                    ])
            except:
                import pdb
                pdb.set_trace()
        else:

            optimizer = Adam(self.igor.LR,
                             clipnorm=self.igor.max_grad_norm,
                             clipvalue=self.igor.grad_clip_threshold)

            theano_kwargs = theano_kwargs or {}
            self.model.compile(loss="categorical_crossentropy",
                               optimizer=optimizer,
                               metrics=['accuracy'],
                               **theano_kwargs)
Beispiel #30
0
def MobileUNet(input_shape=None,
               alpha=1.0,
               alpha_up=1.0,
               depth_multiplier=1,
               dropout=1e-3,
               input_tensor=None,
               weight_decay=1e-5):
    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    b00 = _conv_block(img_input, 32, alpha, strides=(4, 4), block_id=0)
    b01 = _depthwise_conv_block(b00, 64, alpha, depth_multiplier, block_id=1)

    b02 = _depthwise_conv_block(b01, 128, alpha, depth_multiplier, block_id=2, strides=(2, 2))
    b03 = _depthwise_conv_block(b02, 128, alpha, depth_multiplier, block_id=3)

    b04 = _depthwise_conv_block(b03, 256, alpha, depth_multiplier, block_id=4, strides=(2, 2))
    b05 = _depthwise_conv_block(b04, 256, alpha, depth_multiplier, block_id=5)

    b06 = _depthwise_conv_block(b05, 512, alpha, depth_multiplier, block_id=6, strides=(2, 2))
    b07 = _depthwise_conv_block(b06, 512, alpha, depth_multiplier, block_id=7)
    b08 = _depthwise_conv_block(b07, 512, alpha, depth_multiplier, block_id=8)
    b09 = _depthwise_conv_block(b08, 512, alpha, depth_multiplier, block_id=9)
    b10 = _depthwise_conv_block(b09, 512, alpha, depth_multiplier, block_id=10)
    b11 = _depthwise_conv_block(b10, 512, alpha, depth_multiplier, block_id=11)

    b12 = _depthwise_conv_block(b11, 1024, alpha, depth_multiplier, block_id=12, strides=(2, 2))
    b13 = _depthwise_conv_block(b12, 1024, alpha, depth_multiplier, block_id=13)
    # b13 = Dropout(dropout)(b13)

    filters = int(512 * alpha)
    up1 = concatenate([
        Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same',
                        kernel_regularizer=regu.l2(weight_decay),
                        bias_regularizer=regu.l2(weight_decay))(b13),
        b11,
    ], axis=3)
    b14 = _depthwise_conv_block(up1, filters, alpha_up, depth_multiplier, block_id=14)

    filters = int(256 * alpha)
    up2 = concatenate([
        Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same',
                        kernel_regularizer=regu.l2(weight_decay),
                        bias_regularizer=regu.l2(weight_decay))(b14),
        b05,
    ], axis=3)
    b15 = _depthwise_conv_block(up2, filters, alpha_up, depth_multiplier, block_id=15)

    filters = int(128 * alpha)
    up3 = concatenate([
        Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same',
                        kernel_regularizer=regu.l2(weight_decay),
                        bias_regularizer=regu.l2(weight_decay))(b15),
        b03,
    ], axis=3)
    b16 = _depthwise_conv_block(up3, filters, alpha_up, depth_multiplier, block_id=16)

    filters = int(64 * alpha)
    up4 = concatenate([
        Conv2DTranspose(filters, (2, 2), strides=(2,2), padding='same',
                        kernel_regularizer=regu.l2(weight_decay),
                        bias_regularizer=regu.l2(weight_decay))(b16),
        b01,
    ], axis=3)
    b17 = _depthwise_conv_block(up4, filters, alpha_up, depth_multiplier, block_id=17)

    filters = int(32 * alpha)
    up5 = concatenate([b17, b00], axis=3)
    # b18 = _depthwise_conv_block(up5, filters, alpha_up, depth_multiplier, block_id=18)
    b18 = _conv_block(up5, filters, alpha_up, block_id=18)

    logits = Conv2D(1, (1, 1), kernel_initializer='he_normal', activation='linear',
                    kernel_regularizer=regu.l2(weight_decay),
                    bias_regularizer=regu.l2(weight_decay))(b18)
    logits = BilinearUpSampling2D(size=(4, 4),name='logits')(logits)
    proba = Activation('sigmoid', name='proba')(logits)


    model = Model(img_input, [logits,proba])

    return model
def cnn_model(embedding_weights, cv_dat, max_len, model_w, lda, dictionary,
              idx2word, alpha):
    max_len = 1000 if max_len > 1000 else max_len
    #max_len = 1000
    dropout = 0.8
    print max_len

    json_file = open(model_w + 'model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model_lda = model_from_json(loaded_model_json)
    # load weights into new model
    #print layer_dict
    train_x, test_x, train_y, test_y = cv_dat
    test_lda = get_alpha(test_x, lda, dictionary, idx2word)

    print "Maximum length of sentence:" + str(max_len)
    print "Distribution of labels in training set:"
    print Counter([np.argmax(dat) for dat in train_y])
    print "Distribution of labels in testing set:"
    print Counter([np.argmax(dat) for dat in test_y])

    test_x = np.array(sequence.pad_sequences(test_x, maxlen=max_len),
                      dtype=np.int)

    #print (train_x.shape)
    #print train_y.shape
    train_x, val_x, train_y, val_y = train_test_split(train_x,
                                                      train_y,
                                                      test_size=0.166,
                                                      random_state=666,
                                                      stratify=train_y)

    train_lda = get_alpha(train_x, lda, dictionary, idx2word)
    val_lda = get_alpha(val_x, lda, dictionary, idx2word)
    #defining the model architecture now

    train_x = np.array(sequence.pad_sequences(train_x, maxlen=max_len),
                       dtype=np.int)
    val_x = np.array(sequence.pad_sequences(val_x, maxlen=max_len),
                     dtype=np.int)

    review_text = Input(shape=(max_len, ), dtype='int64', name="body_input")

    embedded_layer_body = Embedding(embedding_weights.shape[0],
                                    embedding_weights.shape[1],
                                    mask_zero=False,
                                    input_length=max_len,
                                    weights=[embedding_weights],
                                    trainable=True)(review_text)
    lda_input = Input(shape=(30, ), dtype='float32', name="lda_inp")
    #load the weights from pre-trained model
    lrelu = LeakyReLU(alpha=0.1)
    conv1 = Conv1D(filters=128,
                   kernel_size=1,
                   padding='same',
                   activation=lrelu,
                   weights=layer_dict['conv1d_1'].get_weights())
    conv2 = Conv1D(filters=128,
                   kernel_size=3,
                   padding='same',
                   activation=lrelu,
                   weights=layer_dict['conv1d_2'].get_weights())
    conv3 = Conv1D(filters=128,
                   kernel_size=5,
                   padding='same',
                   activation=lrelu,
                   weights=layer_dict['conv1d_3'].get_weights())

    #conv1 = Conv1D(filters=128, kernel_size=1, padding='same', activation='relu')
    #conv2 = Conv1D(filters=128, kernel_size=3, padding='same', activation='relu')
    #conv3 = Conv1D(filters=128, kernel_size=5, padding='same', activation='relu')

    conv1a = conv1(embedded_layer_body)
    glob1a = GlobalAveragePooling1D()(conv1a)
    #max1 = AveragePooling1D()(conv1a)
    conv2a = conv2(embedded_layer_body)
    glob2a = GlobalAveragePooling1D()(conv2a)
    #max2 = AveragePooling1D()(conv2a)
    conv3a = conv3(embedded_layer_body)
    glob3a = GlobalAveragePooling1D()(conv3a)
    #max3 = AveragePooling1D()(conv3a)

    merge_pooling = concatenate([glob1a, glob2a, glob3a])
    #merge_pooling = concatenate([max1, max2, max3])

    hidden_layer = Dense(1200,
                         activation='tanh',
                         kernel_initializer="glorot_uniform")(merge_pooling)
    #hidden_concat = concatenate([hidden_layer, lda_vec])
    dropout_hidden = Dropout(dropout)(hidden_layer)
    #merge_hidden = concatenate([dropout_hidden, lda_input])
    batch_norm = BatchNormalization()(dropout_hidden)

    #hidden_layer_2 = Dense(600, activation='tanh', kernel_initializer="glorot_uniform")(batch_norm)
    #dropout_hidden_2 = Dropout(0.6)(hidden_layer_2)
    #batch_n_2 = BatchNormalization()(dropout_hidden_2)

    hidden_layer_3 = Dense(600,
                           activation='tanh',
                           kernel_initializer="glorot_uniform")(batch_norm)
    dropout_hidden_3 = Dropout(0.5)(hidden_layer_3)
    batch_n_3 = BatchNormalization()(dropout_hidden_3)

    output_layer = Dense(2, activation='softmax', name='out_sent')(batch_n_3)
    output_lda = Dense(30, activation='softmax', name='out_lda')(batch_n_3)

    model = Model([review_text], output=[output_layer, output_lda])
    layer_dict_nu = dict([(layer.name, layer) for layer in model.layers])

    adam = Adam(lr=0.001)

    model.compile(
        loss=['categorical_crossentropy', 'kullback_leibler_divergence'],
        optimizer=adam,
        metrics=['accuracy'],
        loss_weights={
            'out_sent': (1 - alpha),
            'out_lda': alpha
        })
    #model.compile(loss=ncce, optimizer=adam, metrics=['accuracy'])
    earlystop = EarlyStopping(monitor='val_out_sent_loss',
                              min_delta=0.0001,
                              patience=9,
                              verbose=1,
                              mode='auto')
    callbacks_list = [earlystop]
    print model.summary()
    model.fit([train_x], [train_y, train_lda],
              batch_size=32 * 2,
              epochs=50,
              verbose=1,
              shuffle=True,
              callbacks=callbacks_list,
              validation_data=[[val_x], [val_y, val_lda]])
    #model.fit([train_x, train_lda], [train_y, train_lda], batch_size=64, epochs=25,
    #          verbose=1, shuffle=True)
    test_predictions = model.predict([test_x], verbose=False)
    #test_y = [np.argmax(pred) for pred in test_y]
    test_pred = [np.argmax(pred) for pred in test_predictions[0]]
    #print test_pred
    test_y = [np.argmax(label) for label in test_y]
    error_preds = [
        i for i in range(0, len(test_pred)) if (test_y[i] != test_pred[i])
    ]
    print len(error_preds)
    misclassified = [test_x[i] for i in error_preds]
    misclassified = [[get_id2word(idx, idx2word) for idx in sent if idx != 0]
                     for sent in misclassified]
    labels = [(test_y[i], test_pred[i]) for i in error_preds]
    #acc = accuracy_score(test_y, test_pred)
    print acc
    return acc, misclassified, labels
Beispiel #32
0
def unet_model_3d(input_shape,
                  n_labels,
                  batch_normalization=False,
                  initial_learning_rate=0.00001,
                  metrics=m.dice_coef):
    """
    input_shape:without batch_size,(img_height,img_width,img_depth)
    metrics:
    """

    inputs = Input(input_shape)

    down_layer = []

    layer = inputs

    # down_layer_1
    layer = res_block_v2_3d(layer, 32, batch_normalization=batch_normalization)
    down_layer.append(layer)
    layer = MaxPooling3D(pool_size=[2, 2, 2],
                         strides=[2, 2, 2],
                         padding='same')(layer)

    print(str(layer.get_shape()))

    # down_layer_2
    layer = res_block_v2_3d(layer, 64, batch_normalization=batch_normalization)
    down_layer.append(layer)
    layer = MaxPooling3D(pool_size=[2, 2, 2],
                         strides=[2, 2, 2],
                         padding='same')(layer)

    print(str(layer.get_shape()))

    # down_layer_3
    layer = res_block_v2_3d(layer,
                            128,
                            batch_normalization=batch_normalization)
    down_layer.append(layer)
    layer = MaxPooling3D(pool_size=[2, 2, 2],
                         strides=[2, 2, 2],
                         padding='same')(layer)

    print(str(layer.get_shape()))

    # down_layer_4
    layer = res_block_v2_3d(layer,
                            256,
                            batch_normalization=batch_normalization)
    down_layer.append(layer)
    layer = MaxPooling3D(pool_size=[2, 2, 2],
                         strides=[2, 2, 2],
                         padding='same')(layer)

    print(str(layer.get_shape()))

    # bottle_layer
    layer = res_block_v2_3d(layer,
                            512,
                            batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # up_layer_4
    layer = up_and_concate_3d(layer, down_layer[3])
    layer = res_block_v2_3d(layer,
                            256,
                            batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # up_layer_3
    layer = up_and_concate_3d(layer, down_layer[2])
    layer = res_block_v2_3d(layer,
                            128,
                            batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # up_layer_2
    layer = up_and_concate_3d(layer, down_layer[1])
    layer = res_block_v2_3d(layer, 64, batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # up_layer_1
    layer = up_and_concate_3d(layer, down_layer[0])
    layer = res_block_v2_3d(layer, 32, batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # score_layer
    layer = Conv3D(n_labels, [1, 1, 1], strides=[1, 1, 1])(layer)
    print(str(layer.get_shape()))

    # softmax
    layer = Activation('softmax')(layer)
    print(str(layer.get_shape()))

    outputs = layer

    model = Model(inputs=inputs, outputs=outputs)

    metrics = [metrics]

    model = multi_gpu_model(model, gpus=2)
    model.summary()
    model.compile(optimizer=Adam(lr=initial_learning_rate),
                  loss='categorical_crossentropy',
                  metrics=metrics)

    return model
Beispiel #33
0
def unet_model_3d(input_shape, downsize_filters_factor=1, pool_size=(2, 2, 2), n_labels=1,
                  initial_learning_rate=0.00001, deconvolution=False):
    """
    Builds the 3D UNet Keras model.
    ## ORIGINAL: :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size).
    ## NOW: :param input_shape: Shape of the input data (x_size, y_size, z_size, n_chanels)
    :param downsize_filters_factor: Factor to which to reduce the number of filters. Making this value larger will
    reduce the amount of memory the model will need during training.
    :param pool_size: Pool size for the max pooling operations.
    :param n_labels: Number of binary labels that the model is learning.
    :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
    :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of upsamping. This
    increases the amount memory required during training.
    :return: Untrained 3D UNet Model
    """
    inputs = Input(input_shape)
    conv1 = Conv3D(int(32/downsize_filters_factor), (3, 3, 3), activation='relu',
                   padding='same')(inputs)
    conv1 = Conv3D(int(64/downsize_filters_factor), (3, 3, 3), activation='relu',
                   padding='same')(conv1)
    pool1 = MaxPooling3D(pool_size=pool_size)(conv1)

    conv2 = Conv3D(int(64/downsize_filters_factor), (1, 1, 1), activation='relu',
                   padding='same')(pool1)
    conv2 = Conv3D(int(128/downsize_filters_factor), (1, 1, 1), activation='relu',
                   padding='same')(conv2)

    """
    pool2 = MaxPooling3D(pool_size=pool_size)(conv2)

    conv3 = Conv3D(int(128/downsize_filters_factor), (3, 3, 3), activation='relu',
                   padding='same')(pool2)
    conv3 = Conv3D(int(256/downsize_filters_factor), (3, 3, 3), activation='relu',
                   padding='same')(conv3)
    pool3 = MaxPooling3D(pool_size=pool_size)(conv3)

    conv4 = Conv3D(int(256/downsize_filters_factor), (3, 3, 3), activation='relu',
                   padding='same')(pool3)
    conv4 = Conv3D(int(512/downsize_filters_factor), (3, 3, 3), activation='relu',
                   padding='same')(conv4)
    """
    """
    up5 = get_upconv(pool_size=pool_size, deconvolution=deconvolution, depth=2,
                     nb_filters=int(512/downsize_filters_factor), image_shape=input_shape[1:4])(conv4)
    up5 = concatenate([up5, conv3], axis=-1)
    conv5 = Conv3D(int(256/downsize_filters_factor), (3, 3, 3), activation='relu', padding='same')(up5)
    conv5 = Conv3D(int(256/downsize_filters_factor), (3, 3, 3), activation='relu',
                   padding='same')(conv5)

    up6 = get_upconv(pool_size=pool_size, deconvolution=deconvolution, depth=1,
                     nb_filters=int(256/downsize_filters_factor), image_shape=input_shape[1:4])(conv5)
    up6 = concatenate([up6, conv2], axis=-1)
    conv6 = Conv3D(int(128/downsize_filters_factor), (3, 3, 3), activation='relu', padding='same')(up6)
    conv6 = Conv3D(int(128/downsize_filters_factor), (3, 3, 3), activation='relu',
                   padding='same')(conv6)
    """
    up7 = get_upconv(pool_size=pool_size, deconvolution=deconvolution, depth=0,
                     nb_filters=int(128/downsize_filters_factor), image_shape=input_shape[1:4])(conv2)
    up7 = concatenate([up7, conv1], axis=-1)
    conv7 = Conv3D(int(64/downsize_filters_factor), (3, 3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv3D(int(64/downsize_filters_factor), (3, 3, 3), activation='relu',
                   padding='same')(conv7)

    conv8 = Conv3D(n_labels, (1, 1, 1))(conv7)
    # Shoudl be softmax?
    act = Activation('softmax')(conv8)
    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coef_loss, metrics=[dice_coef])
    model.summary()
    return model
class BaseModel:
    def __init__(self, use_development_set=True):

        # used dirs
        self.save_dir = "../saved_models/"
        self.dir = "../example/"
        self.embedding_dir = "../resource/embedding_matrix.pk"
        self.entity_embedding_dir = "../resource/entity_type_matrix.pk"
        self.index_ids_file = "tri_index_ids.pk"

        # some basic parameters of the model
        self.model = None
        self.max_len = 100
        self.num_words = 6820
        self.entity_type_num = 63

        # pre-trained embeddings and their parameters.
        self.embedding_matrix = BaseModel.load_pickle(self.embedding_dir)
        self.entity_embedding_matrix = BaseModel.load_pickle(
            self.entity_embedding_dir)
        self.embedding_trainable = False
        self.EMBEDDING_DIM = 200
        self.ENTITY_TYPE_VEC_DIM = 50

        # inputs to the model
        self.use_development_set = use_development_set
        self.train_word_inputs, self.train_entity_inputs, self.train_labels, self.train_attention_labels = self.load_data(
            train=True)
        self.test_word_inputs, self.test_entity_inputs, self.test_labels, self.test_attention_labels = self.load_data(
            train=False)
        self.dev_word_inputs, self.dev_entity_inputs, self.dev_labels, self.dev_attention_labels = [
            None, None, None, None
        ]
        # if you want to use development, this part can help you to split the development set from the train set.
        if self.use_development_set:
            self.split_train_set()

        # dict used to calculate the F1
        self.index_ids = BaseModel.load_pickle(self.dir + self.index_ids_file)

        self.sen_input, self.entity_type_input = None, None
        self.sen_embedding, self.entity_embedding = None, None
        self.output = None

    def build_model(self):
        pass

    def train_model(self):
        pass

    def predict(self):
        pass

    def save_model(self, file=""):
        self.model.save_weights(self.save_dir + file)

    def load_data(self, train=True):
        if train:
            path = self.dir + "train_"
        else:
            path = self.dir + "test_"

        rf = open(path + "input.pk", 'rb')
        word_inputs = pickle.load(rf)
        rf.close()

        rf = open(path + "entity_inputs.pk", 'rb')
        entity_inputs = pickle.load(rf)
        rf.close()

        rf = open(path + "labels.pk", 'rb')
        labels = pickle.load(rf)
        rf.close()

        rf = open(path + "attention_label.pk", 'rb')
        attention_labels = pickle.load(rf)
        rf.close()

        return word_inputs, entity_inputs, labels, attention_labels

    @staticmethod
    def load_pickle(file):
        rf = open(file, 'rb')
        embedding_matrix = pickle.load(rf)
        rf.close()
        return embedding_matrix

    def split_train_set(self):
        develop_doc_ids = self.load_pickle(self.dir + "development_doc_ids.pk")
        sen_doc_ids = self.load_pickle(self.dir + "train_sen_doc_ids.pk")

        train_index = []
        develop_index = []

        for index, doc_id in enumerate(sen_doc_ids):
            if doc_id in develop_doc_ids:
                develop_index.append(index)
            else:
                train_index.append(index)

        self.dev_word_inputs = self.train_word_inputs[develop_index]
        self.dev_entity_inputs = self.train_entity_inputs[develop_index]
        self.dev_labels = self.train_labels[develop_index]
        self.dev_attention_labels = self.train_attention_labels[develop_index]

        self.train_word_inputs = self.train_word_inputs[train_index]
        self.train_entity_inputs = self.train_entity_inputs[train_index]
        self.train_labels = self.train_labels[train_index]
        self.train_attention_labels = self.train_attention_labels[train_index]

        print(np.shape(self.train_word_inputs))
        print(np.shape(self.dev_word_inputs))

    def compile_model(self):
        self.sen_input, self.entity_type_input = self.make_input()
        self.sen_embedding, self.entity_embedding = self.embedded()

        self.output = self.build_model()

        inputs = [self.sen_input, self.entity_type_input]

        self.model = Model(inputs=inputs, outputs=self.output)
        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['acc'])

    def make_input(self):
        sentence = Input(shape=(self.max_len, ),
                         dtype='int32',
                         name='sentence_input')
        entity_type = Input(shape=(self.max_len, ),
                            dtype='int32',
                            name='entity_type_input')

        return sentence, entity_type

    def embedded(self):

        sentence_embedding_layer = Embedding(self.num_words + 2,
                                             self.EMBEDDING_DIM,
                                             weights=[self.embedding_matrix],
                                             input_length=self.max_len,
                                             trainable=False,
                                             mask_zero=True)
        sentence_embedding = sentence_embedding_layer(self.sen_input)

        entity_embedding_layer = Embedding(
            self.entity_type_num + 2,
            self.ENTITY_TYPE_VEC_DIM,
            weights=[self.entity_embedding_matrix],
            input_length=self.max_len,
            trainable=True,
            mask_zero=True)
        entity_embedding = entity_embedding_layer(self.entity_type_input)

        return [sentence_embedding, entity_embedding]
embedded_sequences = embedding_layer(sequence_input)

cnns = []

for filter_length in filter_lengths:
    x = Conv1D(nb_filter=nb_filter,
               filter_length=filter_length,
               border_mode='valid',
               activation='relu',
               W_constraint=maxnorm(3),
               W_regularizer=l2(0.0001),
               subsample_length=1)(embedded_sequences)
    x = MaxPooling1D(pool_length=MAX_SEQUENCE_LENGTH - filter_length + 1)(x)
    x = Flatten()(x)
    cnns.append(x)

x = merge(cnns, mode='concat')
x = Dropout(0.2)(x)
x = Dense(128, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)

model = Model(sequence_input, preds)

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# happy learning!
model.fit(x_train, y_train, validation_data=(x_val, y_val),
          nb_epoch=5, batch_size=128)
Beispiel #36
0
def main():
    args = get_args()
    depth = args.depth
    k = args.width
    weight_file = args.weight_file

    # load model and weights
    img_size = 224
    batch_size = 32

    nb_class = 101
    hidden_dim = 512
    vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3))
    last_layer = vgg_model.get_layer('pool5').output
    x = Flatten(name='flatten')(last_layer)
    x = Dense(hidden_dim, activation='relu', name='fc6')(x)
    x = Dense(hidden_dim, activation='relu', name='fc7')(x)
    out = Dense(nb_class, activation='softmax', name='fc8')(x)
    model = Model(vgg_model.input, out)

    model.load_weights(weight_file)
    dataset_root = Path(__file__).parent.joinpath("appa-real",
                                                  "appa-real-release")
    validation_image_dir = dataset_root.joinpath("test")
    gt_valid_path = dataset_root.joinpath("gt_avg_test.csv")
    image_paths = list(validation_image_dir.glob("*_face.jpg"))

    faces = np.empty((batch_size, img_size, img_size, 3))
    ages = []
    image_names = []

    for i, image_path in tqdm(enumerate(image_paths)):
        faces[i % batch_size] = cv2.resize(cv2.imread(str(image_path), 1),
                                           (img_size, img_size))
        image_names.append(image_path.name[:-9])

        if (i + 1) % batch_size == 0 or i == len(image_paths) - 1:
            results = model.predict(faces)
            ages_out = np.arange(0, 101).reshape(101, 1)
            predicted_ages = results[1].dot(ages_out).flatten()
            ages += list(predicted_ages)

    print(len(ages))
    print(len(image_names))
    name2age = {image_names[i]: ages[i] for i in range(len(image_names))}
    df = pd.read_csv(str(gt_valid_path))
    appa_abs_error = 0.0
    real_abs_error = 0.0
    epsilon_error = 0.0
    count1 = 0
    count2 = 0
    iter = 0

    for i, row in df.iterrows():
        #iter += 1
        difference1 = name2age[row.file_name] - row.apparent_age_avg
        difference2 = name2age[row.file_name] - row.real_age
        appa_abs_error += abs(difference1)
        real_abs_error += abs(difference2)
        epsilon_error += error(name2age[row.file_name], row.apparent_age_avg,
                               0.3)
        ''''if int(difference1) == 0:
            count1 += 1
        if int(difference2) == 0:
            count2 += 1
        if iter < 5:
            print("Predicted age: {}".format(name2age[row.file_name]))'''
    print("MAE Apparent: {}".format(appa_abs_error / len(image_names)))
    print("MAE Real: {}".format(real_abs_error / len(image_names)))
    print("\u03B5-error: {}".format(epsilon_error / len(image_names)))
Beispiel #37
0
    def _add_model(self):
        nodes = self.nodes
        config = self.model_config['PHM']
        p = config['dropout_p']
        mlp_l2 = config['l2']
        D = config['mlp_output_dim']

        activation = lambda x: relu(x, alpha=config['leaky_alpha'])
        # SENTENCE LEVEL
        # answer plus question
        nodes['question_encoding_repeated'] = RepeatVector(self.answer_size)(nodes['question_encoding'])
        nodes['answer_plus_question'] = merge([nodes['answer_encoding'], nodes['question_encoding_repeated']],
                                              mode='sum')

        # story mlp and dropout
        ninputs, noutputs = ['story_encoding1'], ['story_encoding_mlp']
        for ngram in config['ngram_inputs']:
            ninputs.append('story_encoding1_%sgram' % ngram)
            noutputs.append('story_encoding_mlp_%sgram' % ngram)

        story_encoding_mlp = NTimeDistributed(Dense(D, init="identity", activation=activation,
                                                    W_regularizer=l2(mlp_l2),
                                                    trainable=config['trainable_story_encoding_mlp']))
        for input, output in zip(ninputs, noutputs):
            nodes[output] = story_encoding_mlp(self._get_node(input))
        qa_encoding_mlp = NTimeDistributed(Dense(D, init="identity", activation=activation,
                                                 W_regularizer=l2(mlp_l2),
                                                 trainable=config['trainable_answer_plus_question_mlp']))

        nodes['answer_plus_question_mlp'] = qa_encoding_mlp(nodes['answer_plus_question'])
        nodes['story_encoding_mlp_dropout'] = Dropout(p)(nodes['story_encoding_mlp'])
        nodes['answer_plus_question_mlp_dropout'] = Dropout(p)(nodes['answer_plus_question_mlp'])

        # norm
        unit_layer = UnitNormalization()
        nodes['story_encoding_mlp_dropout_norm'] = unit_layer(nodes['story_encoding_mlp_dropout'])
        nodes['answer_plus_question_norm'] = unit_layer(nodes['answer_plus_question_mlp_dropout'])
        # cosine
        nodes['story_dot_answer'] = merge([nodes['story_encoding_mlp_dropout_norm'],
                                           nodes['answer_plus_question_norm']],
                                          mode='dot', dot_axes=[2, 2])

        # WORD LEVEL
        # story mlps for word score and distance score
        trainable_word_mlp = self.model_config['PHM']['trainable_word_mlp']

        if trainable_word_mlp:
            story_word_dense = NTimeDistributed(
                Dense(D, init="identity", activation=activation, W_regularizer=l2(mlp_l2),
                      trainable=trainable_word_mlp), first_n=3)
            # q mlps for word and distance scores
            q_or_a_word_dense = NTimeDistributed(
                Dense(D, init="identity", activation=activation, W_regularizer=l2(mlp_l2),
                      trainable=trainable_word_mlp), first_n=3)
        else:
            linear_activation = Activation('linear')
            story_word_dense = linear_activation
            q_or_a_word_dense = linear_activation

        ninputs, noutputs = [], []
        tpls = [(True, 'story_word_embedding1', 'story_word_mlp'),
                ('use_slide_window_inside_sentence', 'reordered_story_word_embedding', 'reordered_story_word_mlp'),
                ('use_slide_window_word', 'story_attentive_word_embedding', 'story_attentive_word_embedding_mlp'),
                ('use_slide_window_reordered_word', 'reordered_story_attentive_word_embedding', 'reordered_story_attentive_word_embedding_mlp')
                ]
        for tpl in tpls:
            a, b, c = tpl
            if a is True or config[a]:
                ninputs.append(b)
                noutputs.append(c)
                if b in ['reordered_story_word_embedding', 'story_word_embedding1']:
                    for ngram in config['ngram_inputs']:
                        ninputs.append('%s_%sgram' % (b, ngram))
                        noutputs.append('%s_%sgram' % (c, ngram))

        for input, output in zip(ninputs, noutputs):
            nodes[output] = story_word_dense(self._get_node(input))
        inputs = ['question_word_embedding', 'answer_word_embedding', 'qa_word_embedding']
        outputs = ['question_word_mlp', 'answer_word_mlp', 'qa_word_mlp']
        for input, output in zip(inputs, outputs):
            nodes[output] = q_or_a_word_dense(self._get_node(input))

        # SIMILARITY MATRICES
        # first for word scores
        # cosine similarity matrix based on sentence and q
        nodes['sim_matrix_q'] = WordByWordMatrix(is_q=True)([nodes['story_word_mlp'], nodes['question_word_mlp']])

        # cosine similarity matrix based on sentence and a
        nodes['sim_matrix_a'] = WordByWordMatrix()([nodes['story_word_mlp'], nodes['answer_word_mlp']])

        # WORD-BY-WORD SCORES
        # q
        nodes['s_q_wbw_score'] = WordByWordScores(trainable=False, is_q=True, alpha=1., threshold=0.15,
                                                  wordbyword_merge_type=config['wordbyword_merge_type'],
                                                  )([nodes['sim_matrix_q'], nodes['__w_question_wbw']])
        # a
        nodes['s_a_wbw_score'] = WordByWordScores(trainable=False, alpha=1., threshold=0.15,
                                                  wordbyword_merge_type=config['wordbyword_merge_type'], )(
            [nodes['sim_matrix_a'], nodes['__w_answer_wbw']])
        # mean
        nodes['story_dot_answer_words'] = GeneralizedMean(mean_type=config['mean_type'],
                                                          trainable=config['trainable_story_dot_answer_words']) \
            ([nodes['s_q_wbw_score'], nodes['s_a_wbw_score']])

        # SLIDING WINDOW INSIDE SENTENCE
        if config['use_slide_window_inside_sentence']:
            # q+a mlp for word score
            # construct cosine similarity matrix based on sentence and qa, for word score
            _inputs = [nodes['reordered_story_word_mlp'], nodes['qa_word_mlp']]
            nodes['wordbyword_slide_sum_within_sentence'] = \
                WordByWordSlideSumInsideSentence(len(_inputs),
                                                 window_size=config['window_size_word_inside'],
                                                 alpha=config['alpha_slide_window_word_inside'],
                                                 use_gaussian_window=config['use_gaussian_window_word_inside'],
                                                 gaussian_std=config['gaussian_sd_word_inside'],
                                                 trainable=config['trainable_slide_window_word_inside'])(_inputs)

        # COMBINE LEVELS
        # sum word-based and sentence-based similarity scores
        inputs = ['story_dot_answer_words', 'story_dot_answer']
        if config['use_slide_window_sentence']:
            inputs.append('story_dot_answer_slide')
            nodes["story_dot_answer_slide"] = SlideSum(alpha=config['alpha_slide_window'],
                                                       use_gaussian_window=config['use_gaussian_window'],
                                                       trainable=config['trainable_slide_window'])(
                nodes['story_dot_answer'])

        if config['use_slide_window_inside_sentence']:
            inputs.append('wordbyword_slide_sum_within_sentence')

        if self.model_config['PHM']['use_depend_score']:
            # SENTENCE-QA DEPENDENCY LEVEL
            inputs.append('lcc_score_matrix')
            nodes['lcc_score_matrix'] = DependencyDistanceScore(config['alpha_depend_score'])(
                self._get_node('input_dep'))

        # sum scores from different component of the model on sentence level.
        # sentence level score merge
        layers_s_input = [nodes[x] for x in inputs]
        weights_s = [1.] * len(layers_s_input)
        nodes['word_plus_sent_sim'] = Combination(len(layers_s_input), input_dim=3, weights=weights_s,
                                                  combination_type=config['sentence_ensemble'],
                                                  trainable=config['trainable_sentence_ensemble'])(layers_s_input)

        # extract max over sentences
        nodes['story_dot_answer_max'] = TimeDistributedMerge(mode='max', axis=1)(nodes['word_plus_sent_sim'])

        # word sliding window
        word_sliding_window_output = ['story_dot_answer_max']
        if config['use_slide_window_word']:
            # q+a mlp for word score
            # construct cosine similarity matrix based on sentence and qa, for word score
            temp_inputs = [nodes['story_attentive_word_embedding_mlp'], nodes['qa_word_mlp']]
            if config['use_qa_idf']:
                temp_inputs.append(nodes['__w_question_answer'])
            nodes['wordbyword_slide_sum'] = WordByWordSlideSum(len(temp_inputs),
                                                               window_size=config['window_size_word'],
                                                               alpha=config['alpha_slide_window_word'],
                                                               use_gaussian_window=config['use_gaussian_window_word'],
                                                               gaussian_std=config['gaussian_sd_word'],
                                                               trainable=config['trainable_slide_window_word'])(
                temp_inputs)
            word_sliding_window_output.append('wordbyword_slide_sum')

        if config['use_slide_window_reordered_word']:
            # q+a mlp for word score
            # construct cosine similarity matrix based on sentence and qa, for word score
            temp_inputs = [nodes['reordered_story_attentive_word_embedding_mlp'], nodes['qa_word_mlp']]
            if config['use_qa_idf']:
                temp_inputs.append(nodes['__w_question_answer'])
            nodes['reordered_wordbyword_slide_sum'] = WordByWordSlideSum(len(temp_inputs),
                                                                         window_size=config[
                                                                             'window_size_reordered_word'],
                                                                         alpha=config[
                                                                             'alpha_slide_window_reordered_word'],
                                                                         use_gaussian_window=config[
                                                                             'use_gaussian_window_reordered_word'],
                                                                         gaussian_std=config[
                                                                             'gaussian_sd_reordered_word'],
                                                                         trainable=config[
                                                                             'trainable_slide_window_reordered_word'])(
                temp_inputs
                )
            word_sliding_window_output.append('reordered_wordbyword_slide_sum')

        # Extract top_n sentence for each answer
        if config['top_n_wordbyword']:
            layers_name = ['word_plus_sent_sim', 'story_word_embedding1', 'qa_word_embedding', '__w_question_answer']
            layers = [nodes[x] for x in layers_name]
            top_n_name = 'top_n_wordbyword'
            nodes[top_n_name] = TopNWordByWord(top_n=config['top_n'], nodes=nodes, use_sum=config['top_n_use_sum'],
                                               trainable=True)(layers)
            word_sliding_window_output.append(top_n_name)

        ngram_output = [self._add_ngram_network(ngram, story_encoding_mlp) for ngram in config['ngram_inputs']]

        # final score merge
        layers_input = [nodes[x] for x in word_sliding_window_output + ngram_output]
        weights = [1.] * len(layers_input)
        for i in range(len(ngram_output)):
            weights[-i - 1] = 1.

        """
        # also aggregate scores that were already aggregated on sentence level.
        sentence_level_weight = 0.1
        for layer_name in sentence_level_merge_layers:
            layer_max = layer_name + "_max"
            if layer_max not in nodes:
                add_node(TimeDistributedMergeEnhanced(mode='max'), layer_max, input=layer_name)
            layers_input.append(nodes[layer_max])
            weights.append(sentence_level_weight)"""

        nodes['story_dot_answer_combined_max'] = Combination(len(layers_input), weights=weights,
                                                             combination_type=config['answer_ensemble'],
                                                             trainable=config['trainable_answer_ensemble'])(
            layers_input)

        # apply not-switch
        input_mul = self._get_node('input_negation_questions')
        nodes['story_dot_answer_max_switch'] = merge([nodes['story_dot_answer_combined_max'], input_mul], mode='mul')

        activation_final = Activation('linear', name='y_hat') \
            if self.model_config['optimizer']['loss'] == 'ranking_loss' else Activation(
            'softmax', name='y_hat')
        prediction = activation_final(nodes['story_dot_answer_max_switch'])

        inputs = self.inputs_nodes.values()
        model = Model(input=inputs, output=prediction)
        optimizer = self._get_optimizer()
        model.compile(loss=self._get_loss_dict(), optimizer=optimizer, metrics={'y_hat': 'accuracy'})
        self.graph = model
Beispiel #38
0
    def create_model(self, embedding_dimensions, lstm_dimensions, dense_dimensions, optimizer, embeddings=None,
                     embeddings_trainable=True):
        """
        creates the neural network model, optionally using precomputed embeddings applied to the training data
        :return: 
        """

        num_words = len(self.tokenizer.word_index)
        logger.info('Creating a model based on %s unique tokens.', num_words)

        # create the shared embedding layer (with or without pre-trained weights)
        embedding_shared = None

        if embeddings is None:
            embedding_shared = Embedding(num_words + 1, embedding_dimensions, input_length=None, mask_zero=True,
                                         trainable=embeddings_trainable, name="embedding_shared")
        else:
            logger.info('Importing pre-trained word embeddings.')
            embeddings_index = load_embeddings(embeddings)

            # indices in word_index start with a 1, 0 is reserved for masking padded value
            embedding_matrix = np.zeros((num_words + 1, embedding_dimensions))

            for word, i in self.tokenizer.word_index.items():
                embedding_vector = embeddings_index.get(word)
                if embedding_vector is not None:
                    # words not found in embedding index will be all-zeros.
                    embedding_matrix[i] = embedding_vector
                else:
                    logger.warning('Word not found in embeddings: %s', word)

                    embedding_shared = Embedding(num_words + 1, embedding_dimensions, input_length=None, mask_zero=True,
                                                 trainable=embeddings_trainable, weights=[embedding_matrix],
                                                 name="embedding_shared")

        input_state = Input(batch_shape=(None, None), name="input_state")
        input_action = Input(batch_shape=(None, None), name="input_action")

        embedding_state = embedding_shared(input_state)
        embedding_action = embedding_shared(input_action)

        lstm_shared = LSTM(lstm_dimensions, name="lstm_shared")
        lstm_state = lstm_shared(embedding_state)
        lstm_action = lstm_shared(embedding_action)

        dense_state = Dense(dense_dimensions, activation='tanh', name="dense_state")(lstm_state)
        dense_action = Dense(dense_dimensions, activation='tanh', name="dense_action")(lstm_action)

        model_state = Model(inputs=input_state, outputs=dense_state, name="state")
        model_action = Model(inputs=input_action, outputs=dense_action, name="action")

        self.model_state = model_state
        self.model_action = model_action

        input_dot_state = Input(shape=(dense_dimensions,))
        input_dot_action = Input(shape=(dense_dimensions,))
        dot_state_action = Dot(axes=-1, normalize=True, name="dot_state_action")([input_dot_state, input_dot_action])

        model_dot_state_action = Model(inputs=[input_dot_state, input_dot_action], outputs=dot_state_action,
                                       name="dot_state_action")
        self.model_dot_state_action = model_dot_state_action

        model = Model(inputs=[model_state.input, model_action.input],
                      outputs=model_dot_state_action([model_state.output, model_action.output]),
                      name="model")
        model.compile(optimizer=optimizer, loss='mse')

        self.model = model

        print('---------------')
        print('Complete model:')
        model.summary()
        print('---------------')
Beispiel #39
0
def rpn_initialize(options):
    config_output_filename = options.config_filename

    with open(config_output_filename, 'rb') as f_in:
        c = pickle.load(f_in)

    import nn_cnn_3_layer as nn

    # turn off any data augmentation at test time
    c.use_horizontal_flips = False
    c.use_vertical_flips = False
    c.rot_90 = False

    img_list_path = options.test_path

    class_mapping = c.class_mapping

    if 'bg' not in class_mapping:
        class_mapping['bg'] = len(class_mapping)

    class_mapping = {v: k for k, v in class_mapping.items()}
    print(class_mapping)
    class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}
    c.num_rois = int(options.num_rois)

    # if c.network == 'resnet50':
    #     num_features = 1024
    # elif c.network == 'vgg':
    #     num_features = 512

    input_shape_img = (None, None, 3)
    # input_shape_features = (None, None, num_features)

    img_input = Input(shape=input_shape_img)
    # roi_input = Input(shape=(c.num_rois, 4))
    # feature_map_input = Input(shape=input_shape_features)

    # define the base network
    shared_layers = nn.nn_base(img_input, trainable=True)

    # define the RPN, built on the base layers
    num_anchors = len(c.anchor_box_scales) * len(c.anchor_box_ratios)
    rpn_layers = nn.rpn(shared_layers, num_anchors)

    # classifier = nn.classifier(feature_map_input, roi_input, c.num_rois, nb_classes=len(class_mapping), trainable=True)

    model_rpn = Model(img_input, rpn_layers)
    # model_classifier_only = Model([feature_map_input, roi_input], classifier)

    # model_classifier = Model([feature_map_input, roi_input], classifier)

    print('Loading weights from {}'.format(options.rpn_weight_path))
    model_rpn.load_weights(options.rpn_weight_path, by_name=True)
    # model_classifier.load_weights(c.model_path, by_name=True)

    model_rpn.compile(optimizer='sgd', loss='mse')
    # model_classifier.compile(optimizer='sgd', loss='mse')
    model_rpn.summary()

    model_classifier = load_model(options.classify_model_path)
    model_classifier.summary()

    all_imgs = []

    classes = {}

    bbox_threshold = 0.8

    visualise = True

    return c, model_rpn, model_classifier
Beispiel #40
0
def unet_model_3d(first_input_shape, second_input_shape, nb_classes, feature_size):

    channel_first_first_input = Input(first_input_shape)
    first_input = Permute([2,3,4,1])(channel_first_first_input)

    first_conv_permute = Permute([4,2,3,1])(first_input)
    first_gpooling_0 = GlobalAveragePooling3D()(first_conv_permute)
    first_gpooling_dense_0 = Dense(units = 32, activation='linear')(first_gpooling_0)
    first_gpooling_dense_1_1 = Dense(units = 29, activation='sigmoid')(first_gpooling_dense_0)
    first_gpooling_fused_2 = Lambda(fuse)([first_conv_permute,first_gpooling_dense_1_1])

    first_conv_layer0 = Conv3D(8, (5, 5, 5), padding = 'same', activation='linear')(first_input)

    first_conv_permute = Permute([4,2,3,1])(first_conv_layer0)
    first_gpooling_0 = GlobalAveragePooling3D()(first_conv_permute)
    first_gpooling_dense_0 = Dense(units = 32, activation='linear')(first_gpooling_0)
    first_gpooling_dense_1_0 = Dense(units = 29, activation='sigmoid')(first_gpooling_dense_0)
    first_gpooling_fused_0 = Lambda(fuse)([first_conv_permute,first_gpooling_dense_1_0])

    first_conv_layer1 = Conv3D(8, (3, 3, 3), padding = 'same', activation='linear')(first_conv_layer0)

    first_conv_permute = Permute([4,2,3,1])(first_conv_layer1)
    first_gpooling_0 = GlobalAveragePooling3D()(first_conv_permute)
    first_gpooling_dense_0 = Dense(units = 32, activation='linear')(first_gpooling_0)
    first_gpooling_dense_1_1 = Dense(units = 29, activation='sigmoid')(first_gpooling_dense_0)
    first_gpooling_fused_1 = Lambda(fuse)([first_conv_permute,first_gpooling_dense_1_1])

    first_gpooling_add_0 = Add()([first_gpooling_fused_0, first_gpooling_fused_1,first_gpooling_fused_2])


    first_conv_layer2 = Conv2D(16, (3, 3), padding = 'same', activation='linear')(first_gpooling_add_0)
    first_pooling_layer1 = MaxPooling2D(pool_size=(2, 2))(first_conv_layer2)

    first_conv_layer3 = Conv2D(16, (3, 3), padding = 'same', activation='linear')(first_pooling_layer1)
    first_pooling_layer2 = MaxPooling2D(pool_size=(2, 2))(first_conv_layer3)

    first_conv_layer4 = Conv2D(16, (3, 3), padding = 'same', activation='linear')(first_pooling_layer2)
    first_pooling_layer3 = MaxPooling2D(pool_size=(2, 2),padding='same')(first_conv_layer4)

    first_flatten_layer1 = Flatten()(first_pooling_layer3)
    first_dense_layer1 = Dense(units = feature_size, activation='relu')(first_flatten_layer1)
    first_dense_layer2 = Dense(units=feature_size, activation='relu')(first_dense_layer1)

    base_model = ResNet50(weights='imagenet',include_top=False,input_shape=[128,128,3])
    second_input = base_model.input
    resnet50_activation_98_output = base_model.output
    resnet50_gpooling = GlobalAveragePooling2D()(resnet50_activation_98_output)


    concat_layer = concatenate([first_dense_layer2, resnet50_gpooling],axis = 1)

    input_target = Input(shape=(1,))  
    centers = Embedding(nb_classes, feature_size*3)(input_target)
    l2_loss = Lambda(center_loss, name='l2_loss')([concat_layer, centers])

    concat_result = Dense(units = nb_classes, activation = 'softmax',name = 'softmax')(concat_layer)
    concat_model = Model(inputs=[channel_first_first_input,second_input,input_target], outputs = [concat_result,l2_loss])
    concat_test_model = Model(inputs=[channel_first_first_input, second_input], outputs=concat_result)

    # return model_train, model_test, second_train_model
    return concat_model,concat_test_model
Beispiel #41
0
class RM(BaseModel):
    def __init__(self, max_len=125, class_num=9, use_development_set=True):

        super(RM, self).__init__(use_development_set)

        self.max_len = max_len
        self.class_num = class_num
        self.name = "mask_attention"

    def build_model(self):
        sentence = Concatenate()([
            self.sen_embedding,
            # self.sen_entity_type_embedding,
            self.position_t_embedding,
            self.position_a_embedding
        ])

        sentence = Bidirectional(
            GRU(300,
                activation="relu",
                return_sequences=True,
                recurrent_dropout=0.3,
                dropout=0.3))(sentence)

        average_layer = Lambda(average, output_shape=no_change)
        position_mt = average_layer(self.position_mt)
        position_ma = average_layer(self.position_ma)

        trigger = Dot(axes=[1, 1])([sentence, position_mt])  # (?, 600)
        entity = Dot(axes=[1, 1])([sentence, position_ma])  # (?, 600)

        attention = Dense(1, activation='tanh')(sentence)  # (?, 125, 1)
        attention = Lambda(lambda x: K.softmax(K.squeeze(x, axis=2)),
                           output_shape=output_shape2)(attention)  # (?, 125)
        weighted_sentence = Dot(axes=[1, 1])([sentence, attention])

        x_layer = Lambda(
            lambda x: K.reshape(x, [-1, self.TRIGGER_TYPE_VEC_DIM]),
            output_shape=output_shape)
        trigger_type = x_layer(self.trigger_type_embedding)  # (?, 50)
        entity_type = x_layer(self.entity_type_embedding)  # (?, 50)

        x = Concatenate()(
            [trigger_type, entity_type, trigger, entity,
             weighted_sentence])  # (?, 1300)
        x = Dropout(rate=0.5)(x)
        output = Dense(9, activation='softmax')(x)

        return output

    def train_model(self, max_epoch=30):

        evaluator = Evaluator(true_labels=self.test_labels,
                              sentences=self.test_sentence_words_input,
                              position_mt=self.test_position_mt,
                              position_me=self.test_position_ma,
                              correction_factor=self.correction_factor)
        log = open("../log/" + self.name + ".txt", 'a+', encoding='utf-8')
        for i in range(max_epoch):
            self.model.fit(
                {
                    'sentence_word': self.train_sentence_words_input,
                    # 'sentence_entity_type': self.train_sentence_entity_inputs,
                    'position_t': self.train_position_t,
                    'position_a': self.train_position_a,
                    'trigger_type': self.train_trigger_type,
                    'entity_type': self.train_entity_type,
                    'trigger_mask': self.train_position_mt,
                    'entity_mask': self.train_position_ma
                },
                self.train_labels,
                epochs=1,
                batch_size=256,
                verbose=1)

            print("# -- test set --- #")
            results = self.model.predict(
                {
                    'sentence_word': self.test_sentence_words_input,
                    # 'sentence_entity_type': self.test_sentence_entity_inputs,
                    'position_t': self.test_position_t,
                    'position_a': self.test_position_a,
                    'trigger_type': self.test_trigger_type,
                    'entity_type': self.test_entity_type,
                    'trigger_mask': self.test_position_mt,
                    'entity_mask': self.test_position_ma
                },
                batch_size=128,
                verbose=0)

            print("--------------epoch " + str(i + 1) +
                  " ---------------------")
            macro_f1, micro_F1, p, r = evaluator.get_f1(predictions=results,
                                                        epoch=i + 1)

            log.write("epoch: " + str(i + 1) + " " + str(p) + " " + str(r) +
                      " " + str(micro_F1) + "\n")
            if (i + 1) % 5 == 0:
                print("current max macro_F1 score: " +
                      str(evaluator.max_macro_F1 * 100))
                print("max macro_F1 is gained in epoch " +
                      str(evaluator.max_macro_F1_epoch))
                print("current max micro_F1 score: " +
                      str(evaluator.max_micro_F1 * 100))
                print("max micro_F1 is gained in epoch " +
                      str(evaluator.max_micro_F1_epoch))

                log.write("current max macro_F1 score: " +
                          str(evaluator.max_macro_F1 * 100) + "\n")
                log.write("max macro_F1 is gained in epoch " +
                          str(evaluator.max_macro_F1_epoch) + "\n")
                log.write("current max micro_F1 score: " +
                          str(evaluator.max_micro_F1 * 100) + "\n")
                log.write("max micro_F1 is gained in epoch " +
                          str(evaluator.max_micro_F1_epoch) + "\n")
            print(
                "------------------------------------------------------------")
        log.close()

    def make_input(self):

        inputs = [None] * 7

        inputs[0] = Input(shape=(self.max_len, ),
                          dtype='int32',
                          name='sentence_word')
        # inputs[1] = Input(shape=(self.max_len,), dtype='int32', name='sentence_entity_type')
        inputs[1] = Input(shape=(self.max_len, ),
                          dtype='int32',
                          name='position_t')
        inputs[2] = Input(shape=(self.max_len, ),
                          dtype='int32',
                          name='position_a')
        inputs[3] = Input(shape=(1, ), dtype='int32', name='trigger_type')
        inputs[4] = Input(shape=(1, ), dtype='int32', name='entity_type')

        inputs[5] = Input(shape=(self.max_len, ),
                          dtype='float32',
                          name='trigger_mask')
        inputs[6] = Input(shape=(self.max_len, ),
                          dtype='float32',
                          name='entity_mask')

        return inputs

    def compile_model(self):
        inputs = [
            self.sen_input, self.position_t, self.position_a,
            self.trigger_type_input, self.entity_type_input, self.position_mt,
            self.position_ma
        ] = self.make_input()

        [
            self.sen_embedding, self.position_t_embedding,
            self.position_a_embedding, self.trigger_type_embedding,
            self.entity_type_embedding
        ] = self.embedded()

        self.output = self.build_model()

        self.model = Model(inputs=inputs, outputs=self.output)
        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['acc'])
        print(self.model.summary())

    def embedded(self):

        self.sentence_embedding_layer = Embedding(
            self.num_words + 2,
            self.EMBEDDING_DIM,
            weights=[self.embedding_matrix],
            input_length=self.max_len,
            trainable=False,
            mask_zero=True)
        sentence_embedding = self.sentence_embedding_layer(self.sen_input)

        position_t_embedding_layer = Embedding(125,
                                               self.POSITION_VEC_DIM,
                                               input_length=self.max_len,
                                               trainable=True,
                                               mask_zero=False)
        position_t_embedding = position_t_embedding_layer(self.position_t)

        position_a_embedding_layer = Embedding(125,
                                               self.POSITION_VEC_DIM,
                                               input_length=self.max_len,
                                               trainable=True,
                                               mask_zero=False)
        position_a_embedding = position_a_embedding_layer(self.position_a)

        trigger_type_embedding_layer = Embedding(38,
                                                 self.TRIGGER_TYPE_VEC_DIM,
                                                 input_length=1,
                                                 trainable=True,
                                                 mask_zero=False)

        trigger_type_embedding = trigger_type_embedding_layer(
            self.trigger_type_input)
        entity_type_embedding = trigger_type_embedding_layer(
            self.entity_type_input)

        return [
            sentence_embedding, position_t_embedding, position_a_embedding,
            trigger_type_embedding, entity_type_embedding
        ]
def unet_model_3d(input_shape,
                  pool_size=(2, 2, 2),
                  n_labels=n_labels,
                  deconvolution=False,
                  depth=3,
                  n_base_filters=32,
                  include_label_wise_dice_coefficients=False,
                  batch_normalization=True,
                  activation_name="sigmoid"):
    """
    Builds the 3D UNet Keras model.f
    :param metrics: List metrics to be calculated during model training (default is dice coefficient).
    :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
    coefficient for each label as metric.
    :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
    layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
    to train the model.
    :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
    layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
    :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
    divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
    :param pool_size: Pool size for the max pooling operations.
    :param n_labels: Number of binary labels that the model is learning.
    :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
    :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
    increases the amount memory required during training.
    :return: Untrained 3D UNet Model
    """
    inputs = Input(input_shape)

    # set image specifics
    kernel = (3, 3, 3)  # kernel size
    s = 2  # stride

    #img_height, img_width = img_size[0], img_size[1]
    padding = 'same'

    conv1 = create_convolution_block(
        input_layer=inputs,
        n_filters=n_base_filters * 4,  # 256 * 256 *8
        batch_normalization=batch_normalization)
    conv1 = create_convolution_block(input_layer=conv1,
                                     n_filters=n_base_filters * 4,
                                     batch_normalization=batch_normalization)
    pool1 = Conv3D(n_base_filters, (3, 3, 3),
                   padding=padding,
                   strides=(2, 2, 1))(conv1)

    conv2 = create_convolution_block(
        input_layer=pool1,
        n_filters=n_base_filters * 4,  # 128 * 128 *8
        batch_normalization=batch_normalization)
    conv2 = create_convolution_block(input_layer=conv2,
                                     n_filters=n_base_filters * 4,
                                     batch_normalization=batch_normalization)
    pool2 = Conv3D(n_base_filters * 2, (3, 3, 3),
                   padding=padding,
                   strides=(1, 1, 2))(conv2)

    conv3 = create_convolution_block(
        input_layer=pool2,
        n_filters=n_base_filters * 2,  # 128 * 128 *4
        batch_normalization=batch_normalization)
    conv3 = create_convolution_block(input_layer=conv3,
                                     n_filters=n_base_filters,
                                     batch_normalization=batch_normalization)
    pool3 = Conv3D(n_base_filters * 4, (3, 3, 3),
                   padding=padding,
                   strides=(2, 2, 1))(conv3)

    conv4 = create_convolution_block(
        input_layer=pool3,
        n_filters=n_base_filters * 2,  # 64 * 64 *4
        batch_normalization=batch_normalization)
    conv4 = create_convolution_block(input_layer=conv4,
                                     n_filters=n_base_filters,
                                     batch_normalization=batch_normalization)
    pool4 = Conv3D(n_base_filters * 2, (3, 3, 3),
                   padding=padding,
                   strides=(2, 2, 1))(conv4)

    conv5 = create_convolution_block(
        input_layer=pool4,
        n_filters=n_base_filters,  # 32 * 32 *4
        batch_normalization=batch_normalization)
    conv5 = create_convolution_block(input_layer=conv5,
                                     n_filters=n_base_filters,
                                     batch_normalization=batch_normalization)
    pool5 = Conv3D(n_base_filters * 4, (3, 3, 3),
                   padding=padding,
                   strides=(2, 2, 2))(conv5)

    conv6 = create_convolution_block(
        input_layer=pool5,
        n_filters=n_base_filters * 2,  # 16 * 16 *2
        batch_normalization=batch_normalization)
    conv6 = create_convolution_block(input_layer=conv6,
                                     n_filters=n_base_filters * 2,
                                     batch_normalization=batch_normalization)

    convskip0 = Conv3D(n_base_filters * 8, (1, 1, 1),
                       padding=padding,
                       strides=(1, 1, 1))(inputs)
    convskip0 = BatchNormalization(axis=-1)(convskip0)
    convskip0 = Activation('relu')(convskip0)

    convskip0 = Conv3D(n_base_filters * 4, (2, 2, 1),
                       padding=padding,
                       strides=(1, 1, 1))(convskip0)
    convskip0 = BatchNormalization(axis=-1)(convskip0)
    convskip0 = Activation('relu')(convskip0)

    convskip0 = Conv3D(n_base_filters * 4, (2, 2, 1),
                       padding=padding,
                       strides=(1, 1, 1))(convskip0)
    convskip0 = BatchNormalization(axis=-1)(convskip0)
    convskip0 = Activation('relu')(convskip0)

    convskip0 = Conv3D(n_base_filters * 4, (1, 1, 2),
                       padding=padding,
                       strides=(1, 1, 1))(convskip0)
    convskip0 = BatchNormalization(axis=-1)(convskip0)
    convskip0 = Activation('relu')(convskip0)

    convskip1 = Conv3D(n_base_filters * 4, (1, 1, 1),
                       padding=padding,
                       strides=(1, 1, 1))(conv1)
    convskip1 = BatchNormalization(axis=-1)(convskip1)
    convskip1 = Activation('relu')(convskip1)

    convskip1 = Conv3D(n_base_filters * 4, (1, 1, 2),
                       padding=padding,
                       strides=(1, 1, 1))(convskip1)
    convskip1 = BatchNormalization(axis=-1)(convskip1)
    convskip1 = Activation('relu')(convskip1)

    convskip1 = Conv3D(n_base_filters * 4, (1, 1, 1),
                       padding=padding,
                       strides=(1, 1, 1))(convskip1)
    convskip1 = BatchNormalization(axis=-1)(convskip1)
    convskip1 = Activation('relu')(convskip1)

    convskip1 = Conv3D(n_base_filters * 4, (1, 1, 2),
                       padding=padding,
                       strides=(1, 1, 1))(convskip1)
    convskip1 = BatchNormalization(axis=-1)(convskip1)
    convskip1 = Activation('relu')(convskip1)

    convskip2 = Conv3D(n_base_filters * 2, (1, 1, 1),
                       padding=padding,
                       strides=(1, 1, 1))(conv2)
    convskip2 = BatchNormalization(axis=-1)(convskip2)
    convskip2 = Activation('relu')(convskip2)

    convskip2 = Conv3D(n_base_filters * 2, (1, 1, 2),
                       padding=padding,
                       strides=(1, 1, 1))(convskip2)
    convskip2 = BatchNormalization(axis=-1)(convskip2)
    convskip2 = Activation('relu')(convskip2)

    convskip2 = Conv3D(n_base_filters * 2, (1, 1, 1),
                       padding=padding,
                       strides=(1, 1, 1))(convskip2)
    convskip2 = BatchNormalization(axis=-1)(convskip2)
    convskip2 = Activation('relu')(convskip2)

    convskip2 = Conv3D(n_base_filters * 2, (1, 1, 2),
                       padding=padding,
                       strides=(1, 1, 1))(convskip2)
    convskip2 = BatchNormalization(axis=-1)(convskip2)
    convskip2 = Activation('relu')(convskip2)

    up_convolution7 = UpSampling3D(size=(2, 2, 2))(conv6)  # 32 * 32 *4
    concat7 = concatenate([up_convolution7, conv5], axis=-1)
    conv7 = create_convolution_block(input_layer=concat7,
                                     n_filters=n_base_filters * 2,
                                     batch_normalization=batch_normalization)
    conv7 = create_convolution_block(input_layer=conv7,
                                     n_filters=n_base_filters * 2,
                                     batch_normalization=batch_normalization)

    up_convolution8 = UpSampling3D(size=(2, 2, 1))(conv7)  # 64 * 64 *4
    concat8 = concatenate([up_convolution8, conv4], axis=-1)
    conv8 = create_convolution_block(input_layer=concat8,
                                     n_filters=n_base_filters,
                                     batch_normalization=batch_normalization)
    conv8 = create_convolution_block(input_layer=conv8,
                                     n_filters=n_base_filters,
                                     batch_normalization=batch_normalization)

    up_convolution8 = UpSampling3D(size=(2, 2, 1))(conv8)  # 128 * 128 *4
    concat9 = concatenate([up_convolution8, conv3], axis=-1)
    conv9 = create_convolution_block(input_layer=concat9,
                                     n_filters=n_base_filters,
                                     batch_normalization=batch_normalization)
    conv9 = create_convolution_block(input_layer=conv9,
                                     n_filters=n_base_filters,
                                     batch_normalization=batch_normalization)

    up_convolution10 = UpSampling3D(size=(1, 1, 2))(conv9)  # 128 * 128 *8
    concat10 = concatenate([up_convolution10, conv2, convskip2], axis=-1)
    conv10 = create_convolution_block(input_layer=concat10,
                                      n_filters=n_base_filters,
                                      batch_normalization=batch_normalization)
    conv10 = create_convolution_block(input_layer=conv10,
                                      n_filters=n_base_filters,
                                      batch_normalization=batch_normalization)

    up_convolution11 = UpSampling3D(size=(2, 2, 1))(conv10)  # 256*256 *8
    concat11 = concatenate([up_convolution11, conv1, convskip0], axis=-1)
    conv11 = create_convolution_block(input_layer=concat11,
                                      n_filters=n_base_filters * 4,
                                      batch_normalization=batch_normalization)
    conv11 = create_convolution_block(input_layer=conv11,
                                      n_filters=n_base_filters * 4,
                                      batch_normalization=batch_normalization)

    concat12 = concatenate([conv11, convskip1], axis=-1)
    concat12 = create_convolution_block(
        input_layer=conv11,
        n_filters=n_base_filters * 4,
        batch_normalization=batch_normalization)
    concat12 = create_convolution_block(
        input_layer=concat12,
        n_filters=n_base_filters * 4,
        batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(conv11)
    act = Activation(activation_name)(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    return model