Beispiel #1
0
def eval_it():
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    import argparse
    parser = argparse.ArgumentParser(description='Train model your dataset')
    parser.add_argument('--test_file',
                        default='test_pair.txt',
                        help='your train file',
                        type=str)

    parser.add_argument('--model_weights',
                        default='result_PFA_FLoss/PFA_00500.h5',
                        help='your model weights',
                        type=str)
    batch_size = 32

    args = parser.parse_args()
    model_name = args.model_weights
    test_path = args.test_file
    HOME = os.path.expanduser('~')
    test_folder = os.path.join(
        HOME,
        '../ads-creative-image-algorithm/public_data/datasets/SalientDataset/DUTS/DUTS-TE'
    )
    if not os.path.exists(test_path):
        ge_train_pair(test_path, test_folder, "DUTS-TE-Image", "DUTS-TE-Mask")
    target_size = (256, 256)
    f = open(test_path, 'r')
    testlist = f.readlines()
    f.close()
    steps_per_epoch = len(testlist) / batch_size
    optimizer = optimizers.SGD(lr=1e-2, momentum=0.9, decay=0)
    loss = EdgeHoldLoss
    metrics = [acc, pre, rec, F_value, MAE]
    with_crf = False
    draw_bound = False
    draw_poly = False
    draw_cutout = False
    dropout = False
    with_CPFE = True
    with_CA = True
    with_SA = True

    if target_size[0] % 32 != 0 or target_size[1] % 32 != 0:
        raise ValueError('Image height and wight must be a multiple of 32')
    testgen = getTestGenerator(test_path, target_size, batch_size)
    model_input = Input(shape=(target_size[0], target_size[1], 3))
    model = VGG16(model_input,
                  dropout=dropout,
                  with_CPFE=with_CPFE,
                  with_CA=with_CA,
                  with_SA=with_SA)
    model.load_weights(model_name, by_name=True)

    for layer in model.layers:
        layer.trainable = False
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    evalSal = model.evaluate_generator(testgen, steps_per_epoch - 1, verbose=1)
    print(evalSal)
def create_sequential_model():
    model = models.Sequential()  # define a Sequential model
    # adding the convelution layer
    # is a 2d layer with shape (3,3)
    # use the activation function rectified linear activation unit (ReLU)
    # this layer summarize the presence of features in an input image
    # results the down sampled feature maps to be the input for next layer
    # each feature map contain the precise position of features in the input image.
    model.add(
        layers.Conv2D(23, (3, 3), activation="relu", input_shape=(28, 28, 1)))

    # add pooling layer after theconvelution layer
    # pooling layer create a new set of the same number of pooled feature maps.
    # the pooled feature map size is less than  the input maps.
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))

    # Flatten layer: doesnot change the number of pooled feature map
    # maps is converted to 1d lists
    model.add(layers.Flatten())

    # ???
    model.add(layers.Dense(100, activation='relu'))

    # adding the output layer with 10 nodes (0-9 classes)
    model.add(layers.Dense(10, activation='softmax'))

    # creat a gradient decent optimizer with learning rate equals 0.01
    opt = optimizers.SGD(lr=0.003)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Beispiel #3
0
    def __init__(self, gui=0, training=1):
        self.gui = gui

        if gui:
            self.gui = widgets.VBox(children=[
                self.tab, self.progress_box, self.out_initial, self.out,
                self.out_stats
            ])
            display(self.init_gui())

        # Images
        self.test_class_indices = []
        self.test_dir = None
        self.init_images()

        self.sgd = optimizers.SGD(lr=0.01,
                                  decay=1e-6,
                                  momentum=0.9,
                                  nesterov=True)
        self.predicted_class_indices_init = []
        self.wrong_guesses = []

        self.train_button.disabled = False
        self.fine_tune_button.disabled = False

        self.init_model()

        if training:
            self.train_model(self.train_button)
Beispiel #4
0
    def instantiate_model(self):
        self.model = create_model_resnet(
            self.input_shape,
            n_output=self.n_output,
            normalize=self.normalize,
            kernel_shape=self.kernel_shape,
            size_blocks=self.size_blocks,
            resnet=self.resnet,
            dropout=self.dropout,
            n_channels_by_block=self.n_channels_by_block,
            size_dense=self.size_dense,
            average_pooling=self.average_pooling,
            separable_conv=self.separable_conv)
        print(self.model.summary())

        self.optimizer = optimizers.Adamax(lr=self.lr) if self.optimizer == 'adamax' \
                    else optimizers.RMSprop(lr=self.lr) if self.optimizer == 'rmsprop' \
                    else optimizers.SGD(lr=self.lr, momentum=.9) if self.optimizer == 'sgd' \
                    else optimizers.Adam(lr=self.lr) if self.optimizer == 'adam' else None

        if self.zoom:
            self.datagen = ImageDataGenerator(rotation_range=10,
                                              width_shift_range=0.1,
                                              height_shift_range=0.1,
                                              zoom_range=0.1,
                                              horizontal_flip=True,
                                              fill_mode='nearest')
        elif self.shift:
            self.datagen = ImageDataGenerator(width_shift_range=0.1,
                                              height_shift_range=0.1,
                                              fill_mode='nearest')
        elif self.flip:
            self.datagen = ImageDataGenerator(horizontal_flip=bool(self.flip))
        else:
            self.datagen = None
Beispiel #5
0
    def __init__(self, verbose=0, test=0):
        self.verbose = verbose
        self.test = test

        if verbose:
            self.gui = widgets.VBox(children=[
                self.tab, self.progress_box, self.out_initial, self.out,
                self.out_stats
            ])

        # Images
        self.test_class_indices = []
        self.test_dir = None
        self.init_images()

        self.sgd = optimizers.SGD(lr=0.01,
                                  decay=1e-6,
                                  momentum=0.9,
                                  nesterov=True)
        self.predicted_class_indices_init = []
        self.wrong_guesses = []

        if not test:
            display(self.init_gui())

        self.train_button.disabled = False
        self.fine_tune_button.disabled = False

        self.init_model()
Beispiel #6
0
def create_model():
    conv_base = ResNet50(weights='imagenet',
                         include_top=False,
                         input_shape=INPUT_SHAPE)
    conv_base.trainable = False
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.GlobalAveragePooling2D())
    model.add(
        layers.Dense(512,
                     activation='relu',
                     kernel_initializer='uniform',
                     kernel_regularizer=reg))
    model.add(layers.Dropout(0.5))
    model.add(
        layers.Dense(512,
                     activation='relu',
                     kernel_initializer='uniform',
                     kernel_regularizer=reg))
    model.add(layers.Dropout(0.5))
    model.add(
        layers.Dense(2,
                     activation='softmax',
                     kernel_initializer='uniform',
                     kernel_regularizer=reg))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.SGD(lr=LEARN_RATE, momentum=0.9),
                  metrics=['accuracy'])
    model.summary()
    return model
def main(args):
    # (train_x, train_y), (test_x, test_y) = iris_data.load_data()
    data = load_iris()
    # print(type(data))
    # print(data.keys())
    train_x, test_x, train_y, test_y = train_test_split(data["data"],
                                                        data["target"],
                                                        train_size=0.7)

    train_y = tf.keras.utils.to_categorical(train_y, num_classes=4)
    test_y = tf.keras.utils.to_categorical(test_y, num_classes=4)

    model = models.Sequential()
    model.add(
        layers.Dense(units=32, activation=activations.relu, input_shape=(4, )))
    model.add(layers.Dense(units=32, activation=activations.relu))
    model.add(layers.Dense(units=4, activation=activations.softmax))

    model.compile(optimizer=optimizers.SGD(),
                  loss=losses.categorical_crossentropy,
                  metrics=[metrics.categorical_accuracy])
    model.summary()

    iris_est = tf.keras.estimator.model_to_estimator(model,
                                                     model_dir="./models/")

    iris_est.train(input_fn=lambda: my_train_input_fn(train_x, train_y, 30),
                   steps=100)

    result = iris_est.evaluate(
        input_fn=lambda: my_eval_input_fn(test_x, test_y, 30))

    print(result)
Beispiel #8
0
def kerasModel(train_x, test_x, train_y, test_y):
    model = Sequential()

    model.add(Conv2D(24, (5, 5), strides=(2, 2), activation="relu", input_shape=(128, 64, 3)))

    model.add(Conv2D(36, (5, 5), strides=(2, 2), activation="relu"))

    model.add(Conv2D(48, (5, 5), strides=(2, 2), activation="relu"))

    model.add(Conv2D(64, (3, 3), strides=(2, 2), activation="relu"))

    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(64, activation="relu"))

    model.add(Dense(32, activation="relu"))

    model.add(Dense(1, activation="relu"))
    model.add(Activation('softmax'))
    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='mse', optimizer=sgd, metrics=['accuracy'])

    model.fit(train_x, train_y, epochs=5)

    # model.compile(loss='mse', optimizer=Adam(lr=0.01, decay=1e-6),metrics=[get_categorical_accuracy_keras])

    # model.fit(train_x,train_y, shuffle=True, nb_epoch=5)

    score = model.evaluate(test_x, test_y, verbose=0)

    print("Accuracy: %.2f%%" % (score[1] * 100))
    return score[1] * 100
Beispiel #9
0
  def testNumericEquivalenceForNesterovMomentum(self):
    if testing_utils.should_run_tf_function() or context.executing_eagerly():
      self.skipTest(
          'v1 optimizer does not run in experimental_run_tf_function mode or '
          'eager mode')
    np.random.seed(1331)
    with self.cached_session():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = keras.utils.to_categorical(y)

      num_hidden = 5
      model_k_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2.set_weights(model_k_v1.get_weights())
      model_tf = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_tf.set_weights(model_k_v2.get_weights())

      opt_k_v1 = optimizers.SGD(momentum=0.9, nesterov=True)
      opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
      opt_tf = momentum.MomentumOptimizer(
          learning_rate=0.01, momentum=0.9, use_nesterov=True)

      model_k_v1.compile(
          opt_k_v1,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly(),
          experimental_run_tf_function=testing_utils.should_run_tf_function())
      model_k_v2.compile(
          opt_k_v2,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly(),
          experimental_run_tf_function=testing_utils.should_run_tf_function())
      model_tf.compile(
          opt_tf,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly(),
          experimental_run_tf_function=testing_utils.should_run_tf_function())

      hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False)

      self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights())
      self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
      self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
      self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss'])
      self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
def compile_keras_model(model, optimizer, learning_rate):
    '''
    Method that takes in input a model and compile it using the specific optimizer. The model is compiled
    with sparse_categorical_crossentropy as loss function.
    :param model: the tensorflow.keras.model implemented.
    :param optimizer: there are several options: 1)sgd ; 2)Adam( I used principally for the training)
    3) adadelta
    :param learning_rate: float that indicates the learning rate to use with the optimizer.
    :return: the model compiled.
    '''
    if (str.lower(optimizer) == "sgd"):
        opt = optimizers.SGD(lr=learning_rate,
                             clipnorm=0.1,
                             momentum=0.95,
                             nesterov=True)
    elif (str.lower(optimizer) == "adam"):
        opt = optimizers.Adam(lr=learning_rate)
    elif (str.lower(optimizer) == "adadelta"):
        opt = optimizers.adadelta(lr=learning_rate)
    sess = K.get_session()
    init = tf.global_variables_initializer()
    sess.run(init)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=opt,
                  metrics=["acc"])
    return model
Beispiel #11
0
 def compiling(self, model):
     
     sgd=optimizers.SGD(lr=0.01,decay=1e-6,momentum=0.9,nesterov=True)
     model.compile(loss=categorical_crossentropy,optimizer=sgd,metrics=['accuracy'])
     #model.compile(optimizer='adam', loss=categorical_crossentropy, metrics=['accuracy'])
     #model.compile(optimizer='adam', loss=categorical_crossentropy, metrics=[self.top_2_categorical_accuracy]) 
     #kCA- K.mean(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))         
     return model
Beispiel #12
0
 def test_pass_invalid_optimizer_with_loss_scaling(self):
     with policy.policy_scope(policy.Policy('float32', loss_scale=10.)):
         x = layers.Input(shape=(1, ))
         y = AddLayer()(x)
         model = models.Model(x, y)
         with self.assertRaisesRegexp(ValueError,
                                      'optimizer" must be an instance of '):
             model.compile(optimizers.SGD(1.), 'mse')
Beispiel #13
0
    def testOptimizersCompatibility(self, opt_str, test_weights, test_numeric):
        np.random.seed(1331)
        with self.cached_session():
            train_samples = 20
            input_dim = 3
            num_classes = 2
            (x,
             y), _ = testing_utils.get_test_data(train_samples=train_samples,
                                                 test_samples=10,
                                                 input_shape=(input_dim, ),
                                                 num_classes=num_classes)
            y = keras.utils.to_categorical(y)

            num_hidden = 5
            model = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)

            old_mode = os.environ.get('TF2_BEHAVIOR', None)
            # Disable tf2 to create V1 optimizer.
            disable_tf2()
            if opt_str == 'momentum':
                opt_v1 = optimizers.SGD(momentum=0.9)
            else:
                opt_v1 = optimizers.get(opt_str)

            # Test compile and fit with v1 optimizer.
            model.compile(opt_v1, loss='categorical_crossentropy', metrics=[])
            model.fit(x, y, batch_size=5, epochs=1)
            model_dir = tempfile.mkdtemp()
            gfile.MakeDirs(model_dir)
            file_name = os.path.join(model_dir, 'model.h5')
            model.save(file_name)

            enable_tf2()
            # Test load and fit with v2 optimizer.
            model_2 = saving.load_model(file_name)
            opt_v2 = model_2.optimizer
            self.assertIsInstance(opt_v2, optimizer_v2.OptimizerV2)
            # set_weights is called inside load_model but exception is swallowed,
            # this call checks the weights can be set correctly.
            if test_weights:
                opt_v2.set_weights(opt_v1.get_weights())
            if test_numeric:
                hist_1 = model.fit(x, y, batch_size=5, epochs=1, shuffle=False)
                hist_2 = model_2.fit(x,
                                     y,
                                     batch_size=5,
                                     epochs=1,
                                     shuffle=False)
                self.assertAllClose(model.get_weights(), model_2.get_weights())
                self.assertAllClose(model.get_weights(), model_2.get_weights())
                self.assertAllClose(hist_1.history['loss'],
                                    hist_2.history['loss'])

            if old_mode is not None:
                os.environ['TF2_BEHAVIOR'] = old_mode
Beispiel #14
0
def compile(keras_model,
            lr,
            momentum,
            clipnorm,
            weight_decay,
            loss_weights={}):
    """
    编译模型,增加损失函数,L2正则化以
    :param keras_model:
    :param lr:
    :param momentum:
    :param clipnorm:
    :param weight_decay
    :param loss_weights:
    :return:
    """
    # 优化目标
    optimizer = optimizers.SGD(lr=lr, momentum=momentum, clipnorm=clipnorm)
    # 增加损失函数,首先清除之前的,防止重复
    keras_model._losses = []
    keras_model._per_input_losses = {}
    loss_names = loss_weights.keys()
    for name in loss_names:
        layer = keras_model.get_layer(name)
        if layer is None or layer.output in keras_model.losses:
            continue
        loss = (tf.reduce_mean(layer.output, keepdims=True) *
                loss_weights.get(name, 1.))
        keras_model.add_loss(loss)

    # 增加L2正则化
    # 跳过批标准化层的 gamma 和 beta 权重
    reg_losses = [
        regularizers.l2(weight_decay)(w) for w in keras_model.trainable_weights
        if 'gamma' not in w.name and 'beta' not in w.name
    ]
    keras_model.add_loss(tf.add_n(reg_losses))

    # 编译
    keras_model.compile(optimizer=optimizer,
                        loss=[None] * len(keras_model.outputs))  # 使用虚拟损失

    # 为每个损失函数增加度量
    for name in loss_names:
        if name in keras_model.metrics_names:
            continue
        layer = keras_model.get_layer(name)
        if layer is None:
            continue
        loss = (tf.reduce_mean(layer.output, keepdims=True) *
                loss_weights.get(name, 1.))
        keras_model.add_metric(loss, aggregation='mean', name=name)
    # 正则化增加度量
    keras_model.add_metric(tf.add_n(reg_losses),
                           aggregation='mean',
                           name='regular_loss')
Beispiel #15
0
def compile_model(model,
                  optimizer='adam',
                  loss='bce-dice',
                  threshold=0.5,
                  dice=False,
                  weight_decay=0.0,
                  exclude_bn=True,
                  deep_supervised=False):
    if loss == 'bce':
        _loss = weighted_binary_crossentropy
    elif loss == 'bce-dice':
        _loss = weighted_bce_dice_loss
    elif loss == 'lovasz':
        _loss = weighted_lovasz_hinge
    elif loss == 'lovasz-dice':
        _loss = weighted_lovasz_dice_loss
    elif loss == 'lovasz-inv':
        _loss = weighted_lovasz_hinge_inversed
    elif loss == 'lovasz-double':
        _loss = weighted_lovasz_hinge_double

    if weight_decay != 0.0:
        _l2_loss = l2_loss(weight_decay, exclude_bn)
        loss = lambda true, pred: _loss(true, pred) + _l2_loss
    else:
        loss = _loss

    if optimizer == ('msgd'):
        optimizer = optimizers.SGD(momentum=0.9)

    if not deep_supervised:
        model.compile(optimizer=optimizer,
                      loss=loss,
                      metrics=get_metrics(threshold))
    else:
        loss_pixel = loss_noempty(loss)
        losses = {
            'output_final': loss,
            'output_pixel': loss_pixel,
            'output_image': bce_with_logits
        }
        loss_weights = {
            'output_final': 1.0,
            'output_pixel': 0.5,
            'output_image': 0.1
        }
        metrics = {
            'output_final': get_metrics(threshold),
            'output_pixel': get_metrics(threshold),
            'output_image': accuracy_with_logits
        }
        model.compile(optimizer=optimizer,
                      loss=losses,
                      loss_weights=loss_weights,
                      metrics=metrics)
    return model
Beispiel #16
0
 def test_pass_invalid_optimizer_with_loss_scaling(self):
     with policy.policy_scope(policy.Policy('float32', loss_scale=10.)):
         x = layers.Input(shape=(1, ))
         y = mp_test_util.MultiplyLayer()(x)
         model = models.Model(x, y)
         if context.executing_eagerly():
             error_msg = 'Use a `tf.keras` Optimizer instead'
         else:
             error_msg = 'optimizer" must be an instance of '
         with self.assertRaisesRegexp(ValueError, error_msg):
             model.compile(optimizers.SGD(1.), 'mse')
Beispiel #17
0
def main(args):
    set_gpu_growth()
    dataset = VocDataset(cfg.voc_path, class_mapping=cfg.class_mapping)
    dataset.prepare()
    train_img_info = [info for info in dataset.get_image_info_list() if info.type == 'trainval']  # 训练集
    print("train_img_info:{}".format(len(train_img_info)))
    test_img_info = [info for info in dataset.get_image_info_list() if info.type == 'test']  # 测试集
    print("test_img_info:{}".format(len(test_img_info)))

    m = ssd_model(cfg.feature_fn, cfg.cls_head_fn, cfg.rgr_head_fn, cfg.input_shape,
                  cfg.num_classes, cfg.specs, cfg.max_gt_num,
                  cfg.positive_iou_threshold, cfg.negative_iou_threshold,
                  cfg.negatives_per_positive, cfg.min_negatives_per_image)

    # 加载预训练模型
    init_epoch = args.init_epoch
    if args.init_epoch > 0:
        text = '{}-{}-{}'.format(cfg.base_model_name, args.batch_size, args.lr)
        m.load_weights('/tmp/ssd-{}.{:03d}.h5'.format(text, init_epoch), by_name=True)
    else:
        m.load_weights(cfg.pretrained_weight_path, by_name=True)
    # 生成器
    transforms = TrainAugmentation(cfg.image_size, cfg.mean_pixel, cfg.std)
    train_gen = Generator(train_img_info,
                          transforms,
                          cfg.input_shape,
                          args.batch_size,
                          cfg.max_gt_num)
    # 生成器
    val_trans = TrainAugmentation(cfg.image_size, cfg.mean_pixel, cfg.std)
    val_gen = Generator(test_img_info,
                        val_trans,
                        cfg.input_shape,
                        args.batch_size,
                        cfg.max_gt_num)
    optimizer = optimizers.SGD(
        lr=args.lr, momentum=args.momentum,
        clipnorm=args.clipnorm)
    m.compile(optimizer=optimizer,
              loss={"class_loss": lambda y_true, y_pred: y_pred,
                    "bbox_loss": lambda y_true, y_pred: y_pred})

    m.summary()

    # 训练
    m.fit_generator(train_gen,
                    epochs=args.epochs,
                    verbose=1,
                    initial_epoch=init_epoch,
                    validation_data=val_gen,
                    use_multiprocessing=False,
                    workers=10,
                    callbacks=get_call_back(args.lr, args.batch_size))
Beispiel #18
0
    def init_model(self,
                   input_shape,
                   num_classes,
                   **kwargs):
        model = Sequential()
        model.add(Conv2D(100, (3, 1), input_shape=input_shape, padding='same'))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(3, 1), strides=2, padding='same'))

        model.add(Conv2D(64, (3, 1), padding='same'))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 1), strides=2, padding='same'))

        model.add(Conv2D(128, (3, 1), padding='same'))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 1), strides=2, padding='same'))

        model.add(Conv2D(128, (3, 1), padding='same'))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 1), strides=2, padding='same'))

        model.add(Conv2D(128, (3, 1), padding='same'))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 1), strides=2, padding='same'))

        model.add(Conv2D(128, (3, 1), padding='same'))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 1), strides=2, padding='same'))

        model.add(Flatten())
        model.add(Dropout(rate=0.5))
        model.add(Dense(1024, 'relu'))
        model.add(Dropout(rate=0.5))
        model.add(Dense(512, activation='relu'))
        model.add(Dense(num_classes))
        model.add(Activation('softmax'))
        optimizer = optimizers.SGD(
            lr=1e-4, decay=5e-5, momentum=0.9, clipnorm=4)
        model.compile(loss='sparse_categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])
        model.summary()

        self._model = model
        self.is_init = True
Beispiel #19
0
    def analyze(self):
        # Transform inputs into numpy arrays (?)
        Inputs = np.asarray(self.Inputs)
        Targets = np.asarray(self.Targets)

        # TODO: create layers with push and a for
        # Create the model of layers
        model = Sequential([
            keras.layers.Dense(self.inputsize,
                               activation=tf.nn.sigmoid,
                               input_dim=self.inputsize),
            keras.layers.Dense(self.middlesize, activation=tf.nn.tanh),
            keras.layers.Dense(self.middlesize, activation=tf.nn.tanh),
            keras.layers.Dense(self.outputize, activation=tf.nn.sigmoid)
        ])

        sgd = optimizers.SGD(lr=self.lr,
                             decay=1e-6,
                             momentum=0.9,
                             nesterov=True)

        model.compile(optimizer=sgd,
                      loss='mean_squared_error',
                      metrics=['accuracy'])

        # Create a model
        history = model.fit(Inputs, Targets, epochs=self.epochs)

        test = np.asarray(self.test)
        predictions = model.predict(test)

        print("Estimaciones = ")
        print(predictions)
        print(np.round(predictions, 2))

        # summarize history for accuracy
        plt.plot(history.history['acc'])
        #plt.plot(history.history['val_acc'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()
        # summarize history for loss
        plt.plot(history.history['loss'])
        #plt.plot(history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()
  def testInvalidConstructorArguments(self):
    with self.assertRaisesRegexp(ValueError,
                                 'must be an instance of OptimizerV2'):
      loss_scale_optimizer.LossScaleOptimizer(optimizers.SGD(), 10.)

    with self.assertRaisesRegexp(ValueError, 'does not support wrapping '
                                             'optimizers with a clipnorm'):
      loss_scale_optimizer.LossScaleOptimizer(
          gradient_descent.SGD(1.0, clipnorm=1.0), 10.)

    with self.assertRaisesRegexp(ValueError, 'does not support wrapping '
                                             'optimizers with a clipvalue'):
      loss_scale_optimizer.LossScaleOptimizer(
          gradient_descent.SGD(1.0, clipvalue=1.0), 10.)
Beispiel #21
0
    def instantiate_model(self):

        self.model = Sequential()
        self.model.add(
            Dense(128 * 128 * 3,
                  activation="relu",
                  input_shape=self.input_shape))
        self.model.add(Dense(32, activation="relu"))
        self.model.add(Dense(self.n_output, activation="softmax"))
        slef.optimizer = optimizers.SGD(self.lr,
                                        momentum=0.0,
                                        decay=0.0,
                                        nesterov=False)
        self.model.compile(self.optimizer, self.loss, self.metrics)
Beispiel #22
0
def predictIncompleteFusion(x_train, y_train, x_test, y_test):

    classifier = Sequential()
    classifier.add(Dense(45, activation='sigmoid', kernel_initializer='random_normal', input_dim=13))
    classifier.add(Dropout(0.6))
    classifier.add(Dense(45, activation='sigmoid', kernel_initializer='random_normal'))
    classifier.add(Dropout(0.6))
    classifier.add(Dense(1, activation='sigmoid', kernel_initializer='random_normal'))
    optimizer=optimizers.SGD(lr=0.9, momentum=0.4)
    classifier.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])

    history = classifier.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=1, epochs=250)

    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title("IncomFus' accuracy graph")
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['accuracy', 'validation accuracy'], loc='upper left')
    plt.show()

    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title("IncomFus' loss graph")
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['loss', 'validation loss'], loc='upper left')
    plt.show()

    y_pred = classifier.predict(x_test)

    pd.DataFrame(y_pred).to_csv("C:\\Users\\nurizdau\\Desktop\\predicted_probs_incomfus.csv")

    y_pred[:] = (y_pred[:] > 0.5)

    # pd.DataFrame(y_pred).to_csv("C:\\Users\\nurizdau\\Desktop\\predicted_results_incomfus.csv")

    eval_model = classifier.evaluate(x_test, y_test)
    resultString = "Accuracy (incomplete fusion) is printed here: " + str(eval_model[1])

    new_test_y = y_test.values.flatten()
    new_pred_y = y_pred.astype(int).flatten()
    cm = confusion_matrix(new_test_y, new_pred_y)
    print(cm)
    print("accuracy of Incomplete Fusion is: ", ((cm[0][0] + cm[1][1]) / np.sum(cm)) * 100)
    print("sensitivity of Incomplete Fusion is: ", (cm[0][0] / np.sum(cm[0])) * 100)
    print("specificity of Incomplete Fusion is: ", (cm[1][1] / np.sum(cm[1])) * 100)

    return resultString
Beispiel #23
0
def get_optimizer(args):
    if args.optimizer == 'Adam':
        kwargs = dict(
            lr=args.lr,
            beta_1=args.beta1,
            beta_2=args.beta2,
            epsilon=args.epsilon
        )
        optimizer = optimizers.Adam(**kwargs)
    elif args.optimizer == 'SGD':
        kwargs = dict(
            lr=args.lr,
            momentum=args.momentum
        )
        optimizer = optimizers.SGD(**kwargs)
    else:
        raise NotImplementedError('Unsupported Optimizer {}'.format(args.optimizer))
    return optimizer
def createModel(learn_rate=0.01,
                momentum=0,
                initialization_mode='random_normal',
                activation='sigmoid',
                dropout_rate=0.0,
                neuron=100,
                loss='mean_squared_error',
                optimizer='SGD'):
    classifier = tf.keras.Sequential()
    classifier.add(
        Dense(90,
              activation='sigmoid',
              kernel_initializer='random_normal',
              input_dim=13))
    classifier.add(Dropout(0.0))
    classifier.add(
        Dense(1, activation='sigmoid', kernel_initializer='random_normal'))
    optimizer = optimizers.SGD(lr=0.9, momentum=0.0)
    classifier.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])

    return classifier
Beispiel #25
0
    def optimizer(self):
        if self._optimizer == 'rms':
            if not self._optim_config:
                self._optim_config = {
                    'lr': 1e-5,
                    'decay': 0.9,
                    'rho': 0.9,
                    'epsilon': 1e-10
                }
            self._optimizer = optimizers.RMSprop(**self._optim_config)
            #self._optimizer = tf.train.RMSPropOptimizer(self._optim_config)
        elif self._optimizer == 'adam':
            if not self._optim_config:
                self._optim_config = {
                    'lr': 1e-5,
                    'beta_1': 0.9,
                    'beta_2': 0.999,
                    'epsilon': 1e-08,
                    'decay': 0.0,
                    'amsgrad': False
                }
            self._optimizer = optimizers.Adam(**self._optim_config)
            #self._optimizer = tf.train.AdamOptimizer(self._optim_config)
        elif self._optimizer == 'sgd':
            if not self._optim_config:
                self._optim_config = {
                    'lr': 1e-5,
                    'momentum': 0.0,
                    'decay': 0.8,
                    'nesterov': False
                }
            self._optimizer = optimizers.SGD(**self._optim_config)
            #self._optimizer = tf.train.GradientDescentOptimizer(self._optim_config)

        elif type(self._optimizer) not in [
                optimizers.Adam, optimizers.SGD, optimizers.RMSprop
        ]:
            logging.error('Unrecognized optimizer type')

        return self._optimizer
Beispiel #26
0
    def instantiate_model(self):
        self.model = create_model_resnet(
            self.input_shape,
            n_output=self.n_output,
            normalize=self.normalize,
            kernel_shape=self.kernel_shape,
            size_blocks=self.size_blocks,
            resnet=self.resnet,
            dropout=self.dropout,
            n_channels_by_block=self.n_channels_by_block,
            size_dense=self.size_dense,
            average_pooling=self.average_pooling,
            separable_conv=self.separable_conv)
        print(self.model.summary())

        self.optimizer = optimizers.Adamax(lr=self.lr) if self.optimizer == 'adamax' \
                    else optimizers.RMSprop(lr=self.lr) if self.optimizer == 'rmsprop' \
                    else optimizers.SGD(lr=self.lr, momentum=.9) if self.optimizer == 'sgd' \
                    else optimizers.Adam(lr=self.lr) if self.optimizer == 'adam' else None

        # TODO
        self.datagen = None
Beispiel #27
0
  def test_pretrained_weights(self):
    keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()
    keras_model.compile(
        loss='categorical_crossentropy',
        optimizer=tf.compat.v1.train.RMSPropOptimizer(1e-3),
        metrics=['mse', keras.metrics.CategoricalAccuracy()])
    keras_model.train_on_batch(
        np.random.random((10,) + _INPUT_SIZE), np.random.random(
            (10, _NUM_CLASS)))
    weights = keras_model.get_weights()
    keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()
    keras_model.set_weights(weights)

    if tf.executing_eagerly():
      sgd_optimizer = optimizer_v2.SGD(lr=0.0001, momentum=0.9)
    else:
      sgd_optimizer = optimizer_v1.SGD(lr=0.0001, momentum=0.9)
    keras_model.compile(
        loss='categorical_crossentropy',
        optimizer=sgd_optimizer,
        metrics=['mse', keras.metrics.CategoricalAccuracy()])
    keras_lib.model_to_estimator(keras_model=keras_model, config=self._config)
    def _model(self):
        model = Sequential()
        model.add(Flatten())
        model.add(Dense(units=64, input_dim=self.state_size,
                        activation="relu"))
        model.add(Dense(units=32, activation="relu"))
        model.add(Dense(units=8, activation="relu"))
        model.add(Dense(self.action_size, activation="softmax"))
        ## Define multiple optional optimizers
        sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        adam = optimizers.Adam(lr=0.01,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=1,
                               decay=0.0,
                               amsgrad=False)

        ## Compile model with metrics
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        return model
Beispiel #29
0
def create_nn_classifier():
    """
    Create neural network for image rendering approach

    Args:
        None
    Returns:
        Keras model (compiled) implementing corresponding interface
    """
    base_model = ResNet50(weights='imagenet', include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(4, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=predictions)
    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Beispiel #30
0
    def __init__(self,
                 gui=1,
                 training=0,
                 model='ResNet50',
                 backend='INTERPRETER',
                 verbose=1):
        self.gui = gui
        self.verbose = verbose
        self.backend = backend
        ngraph_bridge.set_backend(self.backend)

        if gui:
            self.gui = widgets.VBox(children=[
                self.tab, self.progress_box, self.out_initial, self.out,
                self.out_stats
            ])
            display(self.init_gui())

        # Images
        self.test_class_indices = []
        self.test_dir = None
        self.init_images()

        self.sgd = optimizers.SGD(lr=0.01,
                                  decay=1e-6,
                                  momentum=0.9,
                                  nesterov=True)
        self.predicted_class_indices_init = []
        self.wrong_guesses = []

        self.train_button.disabled = False
        self.fine_tune_button.disabled = False

        self.init_model()

        if training:
            self.train_model(self.train_button, model=model)