def __init__(self, shape, action_count: int):
        super().__init__()

        inp = Input(shape=shape)
        flat = Flatten()(inp)

        # Activation: relu, sigmoid, ...
        hidden1 = Dense(256, activation='relu')(flat)
        hidden2 = Dense(64, activation='relu')(hidden1)
        hidden3 = Dense(16, activation='relu')(hidden2)
        output = Dense(action_count, activation='softmax')(hidden3)

        self.model = Model(inputs=inp, outputs=output)
        print(self.model.summary())

        self.memory = SequentialMemory(limit=50000,
                                       window_length=WINDOW_LENGTH)
        self.policy = LinearAnnealedPolicy(EpsGreedyQPolicy(),
                                           attr='eps',
                                           value_max=1.,
                                           value_min=.1,
                                           value_test=.05,
                                           nb_steps=1000)
        self.callbacks = self.build_callbacks("msnake")
        self.dqn = DQNAgent(model=self.model,
                            nb_actions=action_count,
                            memory=self.memory,
                            nb_steps_warmup=50,
                            target_model_update=1e-2,
                            policy=self.policy)

        Adam._name = "fix_bug"  # https://github.com/keras-rl/keras-rl/issues/345
        # Metrics: mae, mse, accuracy
        # LR: learning rate
        self.dqn.compile(Adam(lr=1e-5), metrics=['mse'])
Esempio n. 2
0
    def build(self):
        """
        Builds the full Keras model and stores it in self.model.
        """
        mc = self.config
        in_x = x = Input((12, 8, 8))

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)

        policy_out = Dense(self.config.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=4,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu",
                  name="value_dense")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="chess_model")
Esempio n. 3
0
def compile_and_train(model: training.Model,
                      num_epochs: int) -> Tuple[History, str]:

    accuracies = []
    losses = []
    model.compile(loss=categorical_crossentropy,
                  optimizer=Adam(),
                  metrics=['acc'])
    filepath = 'weights/' + model.name + '.hdf5'
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='loss',
                                 verbose=0,
                                 save_weights_only=True,
                                 save_best_only=True,
                                 mode='auto',
                                 save_freq=1,
                                 period=1)
    tensor_board = TensorBoard(log_dir='logs/', histogram_freq=0, batch_size=5)
    history = model.fit(x=x_train,
                        y=y_train,
                        batch_size=32,
                        epochs=num_epochs,
                        verbose=1,
                        callbacks=[checkpoint, tensor_board],
                        validation_split=0.2)
    weight_files = glob.glob(os.path.join(os.getcwd(), 'weights/*'))
    weight_file = max(weight_files, key=os.path.getctime)  # most recent file
    return history, weight_file
Esempio n. 4
0
def build_compile_3_classes_model():
    """Build and compile a Unet model to predict 3 classes from nucleus or
    cell images: background, edge and foreground.

    Returns
    -------
    model_3_classes : tensorflow.keras.model object
        Compiled Unet model.

    """
    # define inputs
    inputs_image = Input(shape=(None, None, 1), dtype="float32", name="image")

    # define model
    outputs = _get_3_classes_model(inputs_image)
    model_3_classes = Model(inputs_image, outputs, name="3ClassesModel")

    # losses
    loss = tf.keras.losses.SparseCategoricalCrossentropy()

    # metrics
    accuracy = tf.metrics.SparseCategoricalAccuracy(name="accuracy")

    # compile model
    model_3_classes.compile(optimizer='adam', loss=loss, metrics=accuracy)

    return model_3_classes
def model_predict(model, embedded_ip, raw_text, label_layer_name):
    total_items = embedded_ip.shape[0]
    label_model = Model(inputs=model.input,
                        outputs=model.get_layer(label_layer_name).output)
    y_prob = label_model.predict(embedded_ip).reshape(-1)
    y_pred = y_prob > 0.5
    return y_prob, y_pred
Esempio n. 6
0
def sr_gan(hr_shape, upscale_num, gen_loss='vgg'):
    if gen_loss == 'vgg':
        loss_fun = vgg_loss(hr_shape)
    elif gen_loss == 'mse':
        loss_fun = 'mse'
    else:
        pass
    downscale_times = int(math.pow(2, upscale_num))
    lr_shape = (hr_shape[0] // downscale_times, hr_shape[1] // downscale_times,
                hr_shape[2])

    g = generator(lr_shape, upscale_num)
    d = discriminator(hr_shape)

    optimizer = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    g.compile(loss=loss_fun, optimizer=optimizer)
    d.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan_input = Input(shape=lr_shape)

    x = g(gan_input)
    gan_output = d(x)
    gan = Model(inputs=gan_input, outputs=[x, gan_output])
    gan.compile(loss=[loss_fun, "binary_crossentropy"],
                loss_weights=[1., 1e-3],
                optimizer=optimizer)

    return g, d, gan
def get_embedded_input(model, encoded_text):
    """
    Get embedding layer output from a CNN model as the input for CNN_DCNN model
    """
    embedding_layer_model = Model(
        inputs=model.input, outputs=model.get_layer('word_embedding').output)
    return embedding_layer_model.predict(encoded_text)
Esempio n. 8
0
    def test_model(self):
        base = self.base_fun(self.img_input)
        rpn = self.rpn(base)
        classifier = self.classifier(base)

        model_rpn = Model(self.img_input, rpn)
        model_classifier_only = Model([self.feature_input, self.roi_input],
                                      classifier)
        return model_rpn, model_classifier_only
Esempio n. 9
0
 def _ctc_init(self):
     self.labels = Input(name='the_labels', shape=[None], dtype='float32')
     self.input_length = Input(name='input_length',
                               shape=[1],
                               dtype='int64')
     self.label_length = Input(name='label_length',
                               shape=[1],
                               dtype='int64')
     self.loss_out = Lambda(ctc_lambda, output_shape=(1,), name='ctc')\
         ([self.labels, self.outputs, self.input_length, self.label_length])
     self.ctc_model = Model(inputs=[
         self.labels, self.inputs, self.input_length, self.label_length
     ],
                            outputs=self.loss_out)
Esempio n. 10
0
 def build_model(self, input_shape: Tuple[int, int, int]) -> Model:
     x = Input(shape=input_shape, name="input")
     sub_sampling_out, sub_sampling_stack = self.__build_sub_sampling_stack(
         x=x)
     filter_extraction_conv = Conv2D(filters=1024,
                                     kernel_size=(3, 3),
                                     padding="same",
                                     name="filter_extraction_conv",
                                     activation="relu")(sub_sampling_out)
     filter_extraction_conv_bn = BatchNormalization(
         name=f"filter_extraction_conv_bn")(filter_extraction_conv)
     up_sampling_input = Conv2D(
         filters=512,
         kernel_size=(3, 3),
         padding="same",
         name="up_sampling_input",
         activation="relu")(filter_extraction_conv_bn)
     up_sampling_output = self.__build_up_sampling_stack(
         x=up_sampling_input, sub_sampling_stack=sub_sampling_stack)
     out = Conv2D(filters=self._num_classes,
                  kernel_size=(1, 1),
                  padding="same",
                  name="output")(up_sampling_output)
     flatten_out = Softmax(name="output_soft_max")(out)
     return Model(x, flatten_out)
Esempio n. 11
0
    def _model_init(self):
        self.inputs = Input(name='the_inputs', shape=(None, 200, 1))
        self.h1 = cnn_cell(32, self.inputs)
        self.h2 = cnn_cell(64, self.h1)
        self.h3 = cnn_cell(128, self.h2)
        self.h4 = cnn_cell(128, self.h3, pool=False)
        self.h5 = cnn_cell(128, self.h4, pool=False)
        # 200 / 8 * 128 = 3200
        self.h6 = Reshape((-1, 3200))(self.h5)
        self.h6 = Dropout(0.2)(self.h6)
        self.h7 = dense(256)(self.h6)
        self.h7 = Dropout(0.2)(self.h7)
        self.outputs = dense(self.vocab_size, activation='softmax')(self.h7)

        self.model = Model(inputs=self.inputs, outputs=self.outputs)
        self.model.summary()
Esempio n. 12
0
def evaluate_error(model: training.Model) -> np.float64:
    pred = model.predict(x_test, batch_size=32)
    pred = np.argmax(pred, axis=1)
    pred = np.expand_dims(pred, axis=1)  # make same shape as y_test
    error = np.sum(np.not_equal(pred, y_test)) / y_test.shape[0]

    return error
Esempio n. 13
0
def build_compile_double_distance_model():
    """Build and compile a Unet model to predict foreground and a distance map
    from nucleus and cell images.

    This model version takes two images as input (for nucleus and cell).

    Returns
    -------
    model_distance : Tensorflow model
        Compiled Unet model.

    """
    # define inputs
    inputs_nuc = Input(shape=(None, None, 1), dtype="float32", name="nuc")
    inputs_cell = Input(shape=(None, None, 1), dtype="float32", name="cell")
    inputs = [inputs_nuc, inputs_cell]

    # define model
    (output_distance_nuc, output_surface_cell,
     output_distance_cell) = _get_double_distance_model(inputs)
    outputs = [output_distance_nuc, output_surface_cell, output_distance_cell]
    model_distance = Model(inputs, outputs, name="DoubleDistanceModel")

    # losses
    loss_distance_nuc = tf.keras.losses.MeanAbsoluteError()
    loss_surface_cell = tf.keras.losses.BinaryCrossentropy()
    loss_distance_cell = tf.keras.losses.MeanAbsoluteError()
    losses = [[loss_distance_nuc], [loss_surface_cell], [loss_distance_cell]]
    losses_weight = [[1.0], [1.0], [1.0]]

    # metrics
    metric_distance_nuc = tf.metrics.MeanAbsoluteError(name="mae")
    metric_surface_cell = tf.metrics.BinaryAccuracy(name="accuracy")
    metric_distance_cell = tf.metrics.MeanAbsoluteError(name="mae")
    metrics = [[metric_distance_nuc], [metric_surface_cell],
               [metric_distance_cell]]

    # compile model
    model_distance.compile(optimizer='adam',
                           loss=losses,
                           loss_weights=losses_weight,
                           metrics=metrics)

    return model_distance
 def test_raise_dimension_specified(self):
     with self.assertRaises(ValueError):
         inputs = Input(shape=(32, 32, None))
         outputs = OctaveConv2D(13, kernel_size=3, ratio_out=0.0)(inputs)
         model = Model(inputs=inputs, outputs=outputs)
         model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     with self.assertRaises(ValueError):
         inputs_high = Input(shape=(32, 32, 3))
         inputs_low = Input(shape=(32, 32, None))
         outputs = OctaveConv2D(13, kernel_size=3, ratio_out=0.0)([inputs_high, inputs_low])
         model = Model(inputs=[inputs_high, inputs_low], outputs=outputs)
         model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
class DeepAgent:
    """
    This algorithm is trying to use a DQN agent that learns himself just given a gym.
    After quite some trouble with various error messages, this now at least runs and trains.
    It does not yet achieve good results.

    Best result: ???
    """
    def __init__(self, shape, action_count: int):
        super().__init__()

        inp = Input(shape=shape)
        flat = Flatten()(inp)

        # Activation: relu, sigmoid, ...
        hidden1 = Dense(256, activation='relu')(flat)
        hidden2 = Dense(64, activation='relu')(hidden1)
        hidden3 = Dense(16, activation='relu')(hidden2)
        output = Dense(action_count, activation='softmax')(hidden3)

        self.model = Model(inputs=inp, outputs=output)
        print(self.model.summary())

        self.memory = SequentialMemory(limit=50000,
                                       window_length=WINDOW_LENGTH)
        self.policy = LinearAnnealedPolicy(EpsGreedyQPolicy(),
                                           attr='eps',
                                           value_max=1.,
                                           value_min=.1,
                                           value_test=.05,
                                           nb_steps=1000)
        self.callbacks = self.build_callbacks("msnake")
        self.dqn = DQNAgent(model=self.model,
                            nb_actions=action_count,
                            memory=self.memory,
                            nb_steps_warmup=50,
                            target_model_update=1e-2,
                            policy=self.policy)

        Adam._name = "fix_bug"  # https://github.com/keras-rl/keras-rl/issues/345
        # Metrics: mae, mse, accuracy
        # LR: learning rate
        self.dqn.compile(Adam(lr=1e-5), metrics=['mse'])

    def build_callbacks(self, env_name):
        callbacks = []

        checkpoint_weights_filename = 'dqn_' + env_name + '_weights_{step}.h5f'
        callbacks += [
            ModelIntervalCheckpoint(checkpoint_weights_filename, interval=5000)
        ]

        log_filename = 'dqn_{}_log.json'.format(env_name)
        callbacks += [FileLogger(log_filename, interval=100)]
        return callbacks
Esempio n. 16
0
class Am():
    """docstring for Amodel."""
    def __init__(self, args):
        self.vocab_size = args.vocab_size
        self.gpu_nums = args.gpu_nums
        self.lr = args.lr
        self.is_training = args.is_training
        self._model_init()
        if self.is_training:
            self._ctc_init()
            self.opt_init()

    def _model_init(self):
        self.inputs = Input(name='the_inputs', shape=(None, 200, 1))
        self.h1 = cnn_cell(32, self.inputs)
        self.h2 = cnn_cell(64, self.h1)
        self.h3 = cnn_cell(128, self.h2)
        self.h4 = cnn_cell(128, self.h3, pool=False)
        self.h5 = cnn_cell(128, self.h4, pool=False)
        # 200 / 8 * 128 = 3200
        self.h6 = Reshape((-1, 3200))(self.h5)
        self.h6 = Dropout(0.2)(self.h6)
        self.h7 = dense(256)(self.h6)
        self.h7 = Dropout(0.2)(self.h7)
        self.outputs = dense(self.vocab_size, activation='softmax')(self.h7)

        self.model = Model(inputs=self.inputs, outputs=self.outputs)
        self.model.summary()

    def _ctc_init(self):
        self.labels = Input(name='the_labels', shape=[None], dtype='float32')
        self.input_length = Input(name='input_length',
                                  shape=[1],
                                  dtype='int64')
        self.label_length = Input(name='label_length',
                                  shape=[1],
                                  dtype='int64')
        self.loss_out = Lambda(ctc_lambda, output_shape=(1,), name='ctc')\
            ([self.labels, self.outputs, self.input_length, self.label_length])
        self.ctc_model = Model(inputs=[
            self.labels, self.inputs, self.input_length, self.label_length
        ],
                               outputs=self.loss_out)

    def opt_init(self):
        opt = Adam(lr=self.lr,
                   beta_1=0.9,
                   beta_2=0.999,
                   decay=0.01,
                   epsilon=10e-8)
        if self.gpu_nums > 1:
            self.ctc_model = multi_gpu_model(self.ctc_model,
                                             gpus=self.gpu_nums)
        self.ctc_model.compile(loss={
            'ctc': lambda y_true, output: output
        },
                               optimizer=opt,
                               metrics=['accuracy'])
        self.ctc_model.summary()
Esempio n. 17
0
def build_model(num_classes: int,
                input_shape: () = (224, 224),
                pooling: str = 'avg'):
    base_model = Xception(include_top=False,
                          input_shape=(*input_shape, 3),
                          pooling=pooling,
                          weights='imagenet')
    for layer in base_model.layers:
        layer.trainable = False
    x = base_model.output

    logits = Dense(num_classes, name='scores', activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=logits)
    return model
Esempio n. 18
0
def get_model(model_name='c2_net', train_mode=True):

    input = Input(shape=(RESNET_SIZE, RESNET_SIZE, 3))
    if model_name == 'c1_net':
        x = get_custom_model(input, train_mode=train_mode)
    elif model_name == 'c2_net':
        x = get_custom_model2(input, train_mode=train_mode)
    else:
        x = get_resnet_transfer_model(input,
                                      train_mode=train_mode,
                                      freeze_reznet=True)

    if USE_6POSE is True:
        out = Dense(6,
                    activation='softmax',
                    name='pose_dense_ouptut',
                    trainable=train_mode)(x)
    else:
        out = Dense(3,
                    activation=None,
                    name='pose_dense_ouptut',
                    trainable=train_mode)(x)

    model = Model(inputs=input, outputs=out)

    if USE_ADAM_OPT is True:
        optimizer = tf.compat.v1.train.AdamOptimizer(
            learning_rate=0.0001
        )  #  Adam(lr=0.05) tf.compat.v1.train.AdamOptimizer(learning_rate=0.05)
    else:
        optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate=0.001,
                                                         momentum=0.3)

    if train_mode:
        model.compile(optimizer,
                      loss='mse',
                      metrics=['accuracy', 'mae', custom_acc
                               ])  # mse -> mean sqare error | 'accuracy'
    else:
        model.compile(
            optimizer, loss='mae', metrics=['accuracy', 'mae', custom_acc]
        )  # mse -> mean sqare error | 'accuracy' | mae -> mean absolute error

    model.summary()

    return model
Esempio n. 19
0
def build_compile_distance_model():
    """Build and compile a Unet model to predict foreground and a distance map
    from nucleus or cell images.

    Returns
    -------
    model_distance : Tensorflow model
        Compiled Unet model.

    """
    # define inputs
    inputs_image = Input(shape=(None, None, 1), dtype="float32", name="image")

    # define model
    output_surface, output_distance = _get_distance_model(inputs_image)
    outputs = [output_surface, output_distance]
    model_distance = Model(inputs_image, outputs, name="DistanceModel")

    # losses
    loss_surface = tf.keras.losses.BinaryCrossentropy()
    loss_distance = tf.keras.losses.MeanAbsoluteError()
    losses = [[loss_surface], [loss_distance]]
    losses_weight = [[1.0], [1.0]]

    # metrics
    metric_surface = tf.metrics.BinaryAccuracy(name="accuracy")
    metric_distance = tf.metrics.MeanAbsoluteError(name="mae")
    metrics = [[metric_surface], [metric_distance]]

    # compile model
    model_distance.compile(optimizer='adam',
                           loss=losses,
                           loss_weights=losses_weight,
                           metrics=metrics)

    return model_distance
 def test_fit_octave(self):
     inputs = Input(shape=(32, 3))
     high, low = OctaveConv1D(13, kernel_size=3, octave=4)(inputs)
     high, low = MaxPool1D()(high), MaxPool1D()(low)
     conv = OctaveConv1D(5, kernel_size=3, octave=4, ratio_out=0.0)([high, low])
     flatten = Flatten()(conv)
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
Esempio n. 21
0
    def create_model(self, softmax: bool = True) -> Model:
        inputs = []
        tensors = []

        if self.use_words_feature:
            inp, tensor = self.cnn_tensor_builder.create_tensor()
            inputs.append(inp)
            tensors.append(tensor)

        if self.use_context_feature:
            inp_prev, tensor_prev = self.cnn_tensor_builder.create_tensor()
            inp_next, tensor_next = self.cnn_tensor_builder.create_tensor()

            inputs.extend([inp_prev, inp_next])
            tensors.extend([tensor_prev, tensor_next])

        if self.use_syntactic_feature:
            inp, tensor = self.deep_tensor_builder.create_tensor(
                input_shape=(self.syntactic_features_num, ),
                layers=[32, 16],
                dropout=0.2)
            inputs.append(inp)
            tensors.append(tensor)

        if len(tensors) > 1:
            tensor = Concatenate()(tensors)
        elif len(tensors) == 1:
            tensor = tensors[0]
        else:
            raise Exception('Should have features')

        _, tensor = self.deep_tensor_builder.create_tensor(layers=[32, 8],
                                                           dropout=0.2,
                                                           input_tensor=tensor)

        if softmax:
            tensor = Dense(2, activation='softmax')(tensor)
        else:
            tensor = Dense(1, activation='sigmoid')(tensor)

        model = Model(inputs=inputs, outputs=[tensor])

        if softmax:
            model.compile(optimizer='adam',
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
        else:
            model.compile(optimizer='adam',
                          loss='binary_crossentropy',
                          metrics=['accuracy'])

        return model
 def test_fit_channels_first(self):
     inputs = Input(shape=(3, 32, 32))
     high, low = OctaveConv2D(13, kernel_size=3, data_format='channels_first')(inputs)
     high, low = MaxPool2D(data_format='channels_first')(high), MaxPool2D(data_format='channels_first')(low)
     high, low = OctaveConv2D(7, kernel_size=3, data_format='channels_first')([high, low])
     high, low = MaxPool2D(data_format='channels_first')(high), MaxPool2D(data_format='channels_first')(low)
     conv = OctaveConv2D(5, kernel_size=3, ratio_out=0.0, data_format='channels_first')([high, low])
     flatten = Flatten()(conv)
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model, data_format='channels_first')
 def test_make_dual_lambda(self):
     inputs = Input(shape=(32, 32, 3))
     conv = OctaveConv2D(13, kernel_size=3)(inputs)
     pool = OctaveConvDual()(conv, lambda: MaxPool2D())
     conv = OctaveConv2D(7, kernel_size=3)(pool)
     pool = OctaveConvDual()(conv, lambda: MaxPool2D())
     conv = OctaveConv2D(5, kernel_size=3, ratio_out=0.0)(pool)
     flatten = Flatten()(conv)
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
 def build_model(self, input_shape: Tuple[int, int, int]) -> Model:
     x = Input(shape=input_shape, name="input")
     big_branch_out = self.__build_big_output_head(x=x)
     sub_sampled_branches_out = self.__build_sub_sampled_branches_out(x=x)
     medium_branch_up = UpSampling2D(
         size=(2, 2), name="medium_branch_up")(sub_sampled_branches_out)
     medium_branch_up_refine = Conv2D(
         filters=32,
         kernel_size=(3, 3),
         padding="same",
         dilation_rate=(2, 2),
         activation="relu",
         name="medium_branch_up_refine")(medium_branch_up)
     fuse_add = Add(name="big_medium_fuse_add")(
         [big_branch_out, medium_branch_up_refine])
     fuse_add_bn = BatchNormalization(
         name=f"big_medium_fuse_add_bn")(fuse_add)
     cls_conv = Conv2D(filters=self._num_classes,
                       kernel_size=(1, 1),
                       padding="same",
                       name="output")(fuse_add_bn)
     flatten_out = Softmax(name="output_soft_max")(cls_conv)
     return Model(x, flatten_out)
Esempio n. 25
0
    def test_stateful_metrics(self):
        with self.cached_session():
            np.random.seed(1334)

            class BinaryTruePositives(layers.Layer):
                """Stateful Metric to count the total true positives over all batches.

        Assumes predictions and targets of shape `(samples, 1)`.

        Arguments:
            threshold: Float, lower limit on prediction value that counts as a
                positive class prediction.
            name: String, name for the metric.
        """
                def __init__(self, name='true_positives', **kwargs):
                    super(BinaryTruePositives, self).__init__(name=name,
                                                              **kwargs)
                    self.true_positives = K.variable(value=0, dtype='int32')
                    self.stateful = True

                def reset_states(self):
                    K.set_value(self.true_positives, 0)

                def __call__(self, y_true, y_pred):
                    """Computes the number of true positives in a batch.

          Args:
              y_true: Tensor, batch_wise labels
              y_pred: Tensor, batch_wise predictions

          Returns:
              The total number of true positives seen this epoch at the
                  completion of the batch.
          """
                    y_true = math_ops.cast(y_true, 'int32')
                    y_pred = math_ops.cast(math_ops.round(y_pred), 'int32')
                    correct_preds = math_ops.cast(
                        math_ops.equal(y_pred, y_true), 'int32')
                    true_pos = math_ops.cast(
                        math_ops.reduce_sum(correct_preds * y_true), 'int32')
                    current_true_pos = self.true_positives * 1
                    self.add_update(state_ops.assign_add(
                        self.true_positives, true_pos),
                                    inputs=[y_true, y_pred])
                    return current_true_pos + true_pos

            metric_fn = BinaryTruePositives()
            config = metrics.serialize(metric_fn)
            metric_fn = metrics.deserialize(
                config,
                custom_objects={'BinaryTruePositives': BinaryTruePositives})

            # Test on simple model
            inputs = layers.Input(shape=(2, ))
            outputs = layers.Dense(1, activation='sigmoid')(inputs)
            model = Model(inputs, outputs)
            model.compile(optimizer='sgd',
                          loss='binary_crossentropy',
                          metrics=['acc', metric_fn])

            # Test fit, evaluate
            samples = 100
            x = np.random.random((samples, 2))
            y = np.random.randint(2, size=(samples, 1))
            val_samples = 10
            val_x = np.random.random((val_samples, 2))
            val_y = np.random.randint(2, size=(val_samples, 1))

            history = model.fit(x,
                                y,
                                epochs=1,
                                batch_size=10,
                                validation_data=(val_x, val_y))
            outs = model.evaluate(x, y, batch_size=10)
            preds = model.predict(x)

            def ref_true_pos(y_true, y_pred):
                return np.sum(np.logical_and(y_pred > 0.5, y_true == 1))

            # Test correctness (e.g. updates should have been run)
            self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

            # Test correctness of the validation metric computation
            val_preds = model.predict(val_x)
            val_outs = model.evaluate(val_x, val_y, batch_size=10)
            self.assertAllClose(val_outs[2],
                                ref_true_pos(val_y, val_preds),
                                atol=1e-5)
            self.assertAllClose(val_outs[2],
                                history.history['val_true_positives'][-1],
                                atol=1e-5)

            # Test with generators
            gen = [(np.array([x0]), np.array([y0])) for x0, y0 in zip(x, y)]
            val_gen = [(np.array([x0]), np.array([y0]))
                       for x0, y0 in zip(val_x, val_y)]
            history = model.fit_generator(iter(gen),
                                          epochs=1,
                                          steps_per_epoch=samples,
                                          validation_data=iter(val_gen),
                                          validation_steps=val_samples)
            outs = model.evaluate_generator(iter(gen), steps=samples)
            preds = model.predict_generator(iter(gen), steps=samples)

            # Test correctness of the metric results
            self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

            # Test correctness of the validation metric computation
            val_preds = model.predict_generator(iter(val_gen),
                                                steps=val_samples)
            val_outs = model.evaluate_generator(iter(val_gen),
                                                steps=val_samples)
            self.assertAllClose(val_outs[2],
                                ref_true_pos(val_y, val_preds),
                                atol=1e-5)
            self.assertAllClose(val_outs[2],
                                history.history['val_true_positives'][-1],
                                atol=1e-5)
Esempio n. 26
0
  def test_stateful_metrics(self):
    with self.test_session():
      np.random.seed(1334)

      class BinaryTruePositives(layers.Layer):
        """Stateful Metric to count the total true positives over all batches.

        Assumes predictions and targets of shape `(samples, 1)`.

        Arguments:
            threshold: Float, lower limit on prediction value that counts as a
                positive class prediction.
            name: String, name for the metric.
        """

        def __init__(self, name='true_positives', **kwargs):
          super(BinaryTruePositives, self).__init__(name=name, **kwargs)
          self.true_positives = K.variable(value=0, dtype='int32')
          self.stateful = True

        def reset_states(self):
          K.set_value(self.true_positives, 0)

        def __call__(self, y_true, y_pred):
          """Computes the number of true positives in a batch.

          Args:
              y_true: Tensor, batch_wise labels
              y_pred: Tensor, batch_wise predictions

          Returns:
              The total number of true positives seen this epoch at the
                  completion of the batch.
          """
          y_true = math_ops.cast(y_true, 'int32')
          y_pred = math_ops.cast(math_ops.round(y_pred), 'int32')
          correct_preds = math_ops.cast(math_ops.equal(y_pred, y_true), 'int32')
          true_pos = math_ops.cast(
              math_ops.reduce_sum(correct_preds * y_true), 'int32')
          current_true_pos = self.true_positives * 1
          self.add_update(
              state_ops.assign_add(self.true_positives, true_pos),
              inputs=[y_true, y_pred])
          return current_true_pos + true_pos

      metric_fn = BinaryTruePositives()
      config = metrics.serialize(metric_fn)
      metric_fn = metrics.deserialize(
          config, custom_objects={'BinaryTruePositives': BinaryTruePositives})

      # Test on simple model
      inputs = layers.Input(shape=(2,))
      outputs = layers.Dense(1, activation='sigmoid')(inputs)
      model = Model(inputs, outputs)
      model.compile(optimizer='sgd',
                    loss='binary_crossentropy',
                    metrics=['acc', metric_fn])

      # Test fit, evaluate
      samples = 100
      x = np.random.random((samples, 2))
      y = np.random.randint(2, size=(samples, 1))
      val_samples = 10
      val_x = np.random.random((val_samples, 2))
      val_y = np.random.randint(2, size=(val_samples, 1))

      history = model.fit(x, y,
                          epochs=1,
                          batch_size=10,
                          validation_data=(val_x, val_y))
      outs = model.evaluate(x, y, batch_size=10)
      preds = model.predict(x)

      def ref_true_pos(y_true, y_pred):
        return np.sum(np.logical_and(y_pred > 0.5, y_true == 1))

      # Test correctness (e.g. updates should have been run)
      self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

      # Test correctness of the validation metric computation
      val_preds = model.predict(val_x)
      val_outs = model.evaluate(val_x, val_y, batch_size=10)
      self.assertAllClose(
          val_outs[2], ref_true_pos(val_y, val_preds), atol=1e-5)
      self.assertAllClose(
          val_outs[2], history.history['val_true_positives'][-1], atol=1e-5)

      # Test with generators
      gen = [(np.array([x0]), np.array([y0])) for x0, y0 in zip(x, y)]
      val_gen = [(np.array([x0]), np.array([y0]))
                 for x0, y0 in zip(val_x, val_y)]
      history = model.fit_generator(iter(gen),
                                    epochs=1,
                                    steps_per_epoch=samples,
                                    validation_data=iter(val_gen),
                                    validation_steps=val_samples)
      outs = model.evaluate_generator(iter(gen), steps=samples)
      preds = model.predict_generator(iter(gen), steps=samples)

      # Test correctness of the metric results
      self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

      # Test correctness of the validation metric computation
      val_preds = model.predict_generator(iter(val_gen), steps=val_samples)
      val_outs = model.evaluate_generator(iter(val_gen), steps=val_samples)
      self.assertAllClose(
          val_outs[2], ref_true_pos(val_y, val_preds), atol=1e-5)
      self.assertAllClose(
          val_outs[2], history.history['val_true_positives'][-1], atol=1e-5)
Esempio n. 27
0
y_train = np_utils.to_categorical(y_train, NUM_CLASSES)
y_test = np_utils.to_categorical(y_test, NUM_CLASSES)

X_train = X_train.astype("float") / 255.0
X_test = X_test.astype("float") / 255.0

# モデルの定義
model = VGG16(weights='imagenet',
              include_top=False,
              input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(NUM_CLASSES, activation='softmax'))

model = Model(inputs=model.input, outputs=top_model(model.output))

for layer in model.layers[:15]:
    layer.trainable = False

opt = Adam(lr=0.0001)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=16, epochs=5)
results = model.evaluate(X_test, y_test, batch_size=16)
print("test loss, test acc:", results)
model.save("./AnimalJudgmentModel.h5")
Esempio n. 28
0
def data_pre(data):
    # 得到标签
    label = [[i] * len(data[i]) for i in range(len(data))][0]
    label = to_categorical(label)
    # 切词
    context = []
    for i in data:
        for j in i:
            context.append(jieba.lcut(j))

    # 构建词典
    tokenizer = Tokenizer(num_words=20000)
    tokenizer.fit_on_texts(context)

    train_tags_title = tokenizer.texts_to_sequences(context)
    train_tags_title_preprocessed = pad_sequences(train_tags_title,
                                                  maxlen=45,
                                                  padding='post')

    # 预训练词向量
    # embedding_matrix = np.zeros((278028, 30), dtype=np.float32)
    # f = open('wiki.zh.text.vector', encoding='utf-8')
    # f = f.readlines()
    # for text in f:
    #     text = text.split()
    #     if text[0] in context:
    #         embedding_matrix[context[text[0]]] = text[1:]

    # 模型
    x_1 = Input(shape=(45, ))  # 输入数据维度
    embed_1 = Embedding(input_dim=45,
                        output_dim=45)(x_1)  # 将索引值转化为稠密向量,且只能做第一层
    L_1 = (LSTM(64))(embed_1)  # 第一个括号构建一个层 64是输出空间的维度,第二个括号用该层做计算
    L_1 = Dropout(0.5)(L_1)  # 防止过拟合,0.5在这里是需要丢弃的输入比例
    L_1 = Dense(9, activation='softmax')(L_1)  # 3是输出空间维度
    model_one = Model(x_1, L_1)  # x_1输入,L_1输出
    model_one.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['acc'])  # 'binary_crossentropy'
    history = model_one.fit(train_tags_title_preprocessed,
                            label,
                            batch_size=512,
                            epochs=20,
                            validation_split=0.1,
                            shuffle=True)
    # 汇总acc函数历史数据
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model acc')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()
    # 汇总损失函数历史数据
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()
Esempio n. 29
0
def run(model):
    # Download kitti dataset
    build_data.maybe_download_training_img(DATA_DIRECTORY)

    x, y = build_data.get_data(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE)

    if model is None:
        inputs = Input(shape=(IMAGE_SHAPE[0], IMAGE_SHAPE[1], 3))

        # Block 1
        block1_conv1 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv1')(inputs)
        block1_conv2 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv2')(block1_conv1)
        block1_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block1_pool')(block1_conv2)

        # Block 2
        block2_conv1 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv1')(block1_pool)
        block2_conv2 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv2')(block2_conv1)
        block2_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block2_pool')(block2_conv2)

        # Block 3
        block3_conv1 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv1')(block2_pool)
        block3_conv2 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv2')(block3_conv1)
        block3_conv3 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv3')(block3_conv2)
        block3_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block3_pool')(block3_conv3)

        # Block 4
        block4_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv1')(block3_pool)
        block4_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv2')(block4_conv1)
        block4_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv3')(block4_conv2)
        block4_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block4_pool')(block4_conv3)

        # Block 5
        block5_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv1')(block4_pool)
        block5_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv2')(block5_conv1)
        block5_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv3')(block5_conv2)
        block5_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block5_pool')(block5_conv3)

        pool5_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block5_pool)
        upsample_1 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(pool5_conv1x1)

        pool4_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block4_pool)
        add_1 = Add()([upsample_1, pool4_conv1x1])

        upsample_2 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(add_1)
        pool3_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block3_pool)
        add_2 = Add()([upsample_2, pool3_conv1x1])

        upsample_3 = Conv2DTranspose(2,
                                     kernel_size=(16, 16),
                                     strides=(8, 8),
                                     padding="same")(add_2)
        output = Dense(2, activation='softmax')(upsample_3)

        model = Model(inputs, output, name='multinet_seg')

        adam = Adam(lr=LEARNING_RATE)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])

    model.fit(x, y, batch_size=BATCH_SIZE, epochs=EPOCHS)
    model.save('trained_model/trained_model' + str(time.time()) + '.h5')
Esempio n. 30
0
    def train_model(self, model_weight=None, optimizers=None, lr=None):

        base = self.base_fun(self.img_input)
        rpn = self.rpn(base)
        classifier = self.classifier(base)

        if lr is None:
            lr = 1e-4
        if optimizers == 'SGD':
            optimizer_rpn = SGD(lr=lr)
            optimizer_classifier = SGD(lr=lr)
        else:
            optimizer_rpn = Adam(lr=lr)
            optimizer_classifier = Adam(lr=lr)
        model_rpn = Model(self.img_input, rpn[:2])
        model_classifier = Model([self.img_input, self.roi_input], classifier)
        model_all = Model([self.img_input, self.roi_input],
                          rpn[:2] + classifier)

        if model_weight is not None:
            model_rpn.load_weights(model_weight, by_name=True)
            model_classifier.load_weights(model_weight, by_name=True)
            model_all.load_weights(model_weight, by_name=True)

        model_rpn.compile(optimizer=optimizer_rpn,
                          loss=[
                              losses.rpn_loss_cls(self.num_anchors),
                              losses.rpn_loss_reg(self.num_anchors)
                          ])
        model_classifier.compile(
            optimizer=optimizer_classifier,
            loss=[
                losses.class_loss_cls,
                losses.class_loss_reg(self.num_cls - 1)
            ],
            metrics={'dense_class_{}'.format(self.num_cls): 'accuracy'})
        model_all.compile(optimizer='sgd', loss='mae')

        return model_rpn, model_classifier, model_all
def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
  """Replicates a model on different GPUs.

  Specifically, this function implements single-machine
  multi-GPU data parallelism. It works in the following way:

  - Divide the model's input(s) into multiple sub-batches.
  - Apply a model copy on each sub-batch. Every model copy
      is executed on a dedicated GPU.
  - Concatenate the results (on CPU) into one big batch.

  E.g. if your `batch_size` is 64 and you use `gpus=2`,
  then we will divide the input into 2 sub-batches of 32 samples,
  process each sub-batch on one GPU, then return the full
  batch of 64 processed samples.

  This induces quasi-linear speedup on up to 8 GPUs.

  This function is only available with the TensorFlow backend
  for the time being.

  Args:
      model: A Keras model instance. To avoid OOM errors,
          this model could have been built on CPU, for instance
          (see usage example below).
      gpus: Integer >= 2, number of on GPUs on which to create
          model replicas.
      cpu_merge: A boolean value to identify whether to force
          merging model weights under the scope of the CPU or not.
      cpu_relocation: A boolean value to identify whether to
          create the model's weights under the scope of the CPU.
          If the model is not defined under any preceding device
          scope, you can still rescue it by activating this option.

  Returns:
      A Keras `Model` instance which can be used just like the initial
      `model` argument, but which distributes its workload on multiple GPUs.

  Example 1: Training models with weights merge on CPU

  ```python
      import tensorflow as tf
      from keras.applications import Xception
      from keras.utils import multi_gpu_model
      import numpy as np

      num_samples = 1000
      height = 224
      width = 224
      num_classes = 1000

      # Instantiate the base model (or "template" model).
      # We recommend doing this with under a CPU device scope,
      # so that the model's weights are hosted on CPU memory.
      # Otherwise they may end up hosted on a GPU, which would
      # complicate weight sharing.
      with tf.device('/cpu:0'):
          model = Xception(weights=None,
                           input_shape=(height, width, 3),
                           classes=num_classes)

      # Replicates the model on 8 GPUs.
      # This assumes that your machine has 8 available GPUs.
      parallel_model = multi_gpu_model(model, gpus=8)
      parallel_model.compile(loss='categorical_crossentropy',
                             optimizer='rmsprop')

      # Generate dummy data.
      x = np.random.random((num_samples, height, width, 3))
      y = np.random.random((num_samples, num_classes))

      # This `fit` call will be distributed on 8 GPUs.
      # Since the batch size is 256, each GPU will process 32 samples.
      parallel_model.fit(x, y, epochs=20, batch_size=256)

      # Save model via the template model (which shares the same weights):
      model.save('my_model.h5')
  ```

  Example 2: Training models with weights merge on CPU using cpu_relocation

  ```python
       ..
       # Not needed to change the device scope for model definition:
       model = Xception(weights=None, ..)

       try:
           model = multi_gpu_model(model, cpu_relocation=True)
           print("Training using multiple GPUs..")
       except:
           print("Training using single GPU or CPU..")

       model.compile(..)
       ..
  ```

  Example 3: Training models with weights merge on GPU (recommended for NV-link)

  ```python
       ..
       # Not needed to change the device scope for model definition:
       model = Xception(weights=None, ..)

       try:
           model = multi_gpu_model(model, cpu_merge=False)
           print("Training using multiple GPUs..")
       except:
           print("Training using single GPU or CPU..")
       model.compile(..)
       ..
  ```

  Raises:
    ValueError: if the `gpus` argument does not match available devices.
  """
  if isinstance(gpus, (list, tuple)):
    if len(gpus) <= 1:
      raise ValueError('For multi-gpu usage to be effective, '
                       'call `multi_gpu_model` with `len(gpus) >= 2`. '
                       'Received: `gpus=%s`' % gpus)
    num_gpus = len(gpus)
    target_gpu_ids = gpus
  else:
    if gpus <= 1:
      raise ValueError('For multi-gpu usage to be effective, '
                       'call `multi_gpu_model` with `gpus >= 2`. '
                       'Received: `gpus=%s`' % gpus)
    num_gpus = gpus
    target_gpu_ids = range(num_gpus)

  target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in target_gpu_ids]
  available_devices = _get_available_devices()
  available_devices = [
      _normalize_device_name(name) for name in available_devices
  ]
  for device in target_devices:
    if device not in available_devices:
      raise ValueError('To call `multi_gpu_model` with `gpus=%s`, '
                       'we expect the following devices to be available: %s. '
                       'However this machine only has: %s. '
                       'Try reducing `gpus`.' % (gpus, target_devices,
                                                 available_devices))

  def get_slice(data, i, parts):
    """Slice an array into `parts` slices and return slice `i`.

    Args:
      data: array to slice.
      i: index of slice to return.
      parts: number of slices to make.

    Returns:
      Slice `i` of `data`.
    """
    shape = array_ops.shape(data)
    batch_size = shape[:1]
    input_shape = shape[1:]
    step = batch_size // parts
    if i == parts - 1:
      size = batch_size - step * i
    else:
      size = step
    size = array_ops.concat([size, input_shape], axis=0)
    stride = array_ops.concat([step, input_shape * 0], axis=0)
    start = stride * i
    return array_ops.slice(data, start, size)

  # Relocate the model definition under CPU device scope if needed
  if cpu_relocation:
    from tensorflow.python.keras.models import clone_model  # pylint: disable=g-import-not-at-top
    with ops.device('/cpu:0'):
      model = clone_model(model)

  all_outputs = [[] for _ in range(len(model.outputs))]

  # Place a copy of the model on each GPU,
  # each getting a slice of the inputs.
  for i, gpu_id in enumerate(target_gpu_ids):
    with ops.device('/gpu:%d' % gpu_id):
      with backend.name_scope('replica_%d' % gpu_id):
        inputs = []
        # Retrieve a slice of the input.
        for x in model.inputs:
          input_shape = tuple(x.shape.as_list())[1:]
          slice_i = Lambda(
              get_slice,
              output_shape=input_shape,
              arguments={
                  'i': i,
                  'parts': num_gpus
              })(
                  x)
          inputs.append(slice_i)

        # Apply model on slice
        # (creating a model replica on the target device).
        outputs = model(inputs)
        if not isinstance(outputs, list):
          outputs = [outputs]

        # Save the outputs for merging back together later.
        for o, output in enumerate(outputs):
          all_outputs[o].append(output)

  # Deduplicate output names to handle Siamese networks.
  occurrences = {}
  for n in model.output_names:
    if n not in occurrences:
      occurrences[n] = 1
    else:
      occurrences[n] += 1
  conflict_counter = {n: 0 for n, count in occurrences.items() if count > 1}
  output_names = []
  for n in model.output_names:
    if n in conflict_counter:
      conflict_counter[n] += 1
      n += '_%d' % conflict_counter[n]
    output_names.append(n)

  # Merge outputs under expected scope.
  with ops.device('/cpu:0' if cpu_merge else '/gpu:%d' % target_gpu_ids[0]):
    merged = []
    for name, outputs in zip(output_names, all_outputs):
      merged.append(concatenate(outputs, axis=0, name=name))
    return Model(model.inputs, merged)