Exemplo n.º 1
0
def decode_predictions(preds, top=5):
    LABELS = None
    if len(preds.shape) == 2:
        if preds.shape[1] == 2622:
            fpath = get_file('rcmalli_vggface_labels_v1.npy',
                             V1_LABELS_PATH,
                             cache_subdir=VGGFACE_DIR)
            LABELS = np.load(fpath)
        elif preds.shape[1] == 8631:
            fpath = get_file('rcmalli_vggface_labels_v2.npy',
                             V2_LABELS_PATH,
                             cache_subdir=VGGFACE_DIR)
            LABELS = np.load(fpath)
        else:
            raise ValueError(
                '`decode_predictions` expects '
                'a batch of predictions '
                '(i.e. a 2D array of shape (samples, 2622)) for V1 or '
                '(samples, 8631) for V2.'
                'Found array with shape: ' + str(preds.shape))
    else:
        raise ValueError(
            '`decode_predictions` expects '
            'a batch of predictions '
            '(i.e. a 2D array of shape (samples, 2622)) for V1 or '
            '(samples, 8631) for V2.'
            'Found array with shape: ' + str(preds.shape))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [[str(LABELS[i].encode('utf8')), pred[i]]
                  for i in top_indices]
        result.sort(key=lambda x: x[1], reverse=True)
        results.append(result)
    return results
Exemplo n.º 2
0
def mnist_makedf(url, datasetInfo):

    from tensorflow.keras.utils.data_utils import get_file
    path = get_file("mnist.npz", origin=url)

    f = np.load(path)
    #datasetInfo["classwidth"] = f['y_train'].shape[1]
    x_train, y_train = f['x_train'], f['y_train']
    x_test, y_test = f['x_test'], f['y_test']
    f.close()
    flattentrain = flatten_mnist(x_train)
    flattentest = flatten_mnist(x_test)
    all_train = np.concatenate(
        (flattentrain, y_train.reshape((y_train.shape[0], 1))), axis=1)
    all_test = np.concatenate((flattentest, y_test.reshape(
        (y_test.shape[0], 1))),
                              axis=1)
    all_data = np.concatenate((all_train, all_test), axis=0)

    feature_values = all_data[:, :-1].astype("float32")
    feature_values /= 255
    target_values = all_data[:, -1]
    target_values = target_values.reshape((target_values.shape[0], 1))
    all_normalized_data = np.concatenate((feature_values, target_values),
                                         axis=1)
    retdf = pd.DataFrame(data=all_normalized_data)

    return retdf
Exemplo n.º 3
0
def vgg19(input_shape, include_top=False, weights='imagenet', **kwargs):
    """VGG 19-layer model (configuration "E")

    Args:
        input_shape:   input shape (224x224x3)
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = VGG(make_layers(cfg['E'], input_shape), **kwargs)

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',
                                    model_urls['vgg19']['with_top'],
                                    cache_subdir='models',
                                    file_hash='cbe5617147190e668d6c5d5026f83318')
        else:
            weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                    model_urls['vgg19']['with_top'],
                                    cache_subdir='models',
                                    file_hash='253f8cb515780f3b799900260a226db6')
        model.load_weights(weights_path)

    return model
Exemplo n.º 4
0
def vgg16(input_shape, include_top=False, weights='imagenet', **kwargs):
    """VGG 16-layer model (configuration "D")

    Args:
        input_shape:   input shape (224x224x3)
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = VGG(make_layers(cfg['D'], input_shape), **kwargs)

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
                                    model_urls['vgg16']['with_top'],
                                    cache_subdir='models',
                                    file_hash='64373286793e3c8b2b4e3219cbf3544b')
        else:
            weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                    model_urls['vgg16']['with_notop'],
                                    cache_subdir='models',
                                    file_hash='6d6bbae143d832006294945121d1f1fc')
        model.load_weights(weights_path)

    return model
Exemplo n.º 5
0
def load_data(path='conll2000.zip', min_freq=2):
    path = get_file(path,
                    origin='https://raw.githubusercontent.com/nltk'
                    '/nltk_data/gh-pages/packages/corpora/conll2000.zip')
    print(path)
    archive = ZipFile(path, 'r')
    train = _parse_data(archive.open('conll2000/train.txt'))
    test = _parse_data(archive.open('conll2000/test.txt'))
    archive.close()

    word_counts = Counter(row[0].lower() for sample in train for row in sample)
    vocab = ['<pad>', '<unk>']
    vocab += [w for w, f in iter(word_counts.items()) if f >= min_freq]
    # in alphabetic order
    pos_tags = sorted(
        list(set(row[1] for sample in train + test for row in sample)))
    # in alphabetic order
    chunk_tags = sorted(
        list(set(row[2] for sample in train + test for row in sample)))

    train = _process_data(train, vocab, pos_tags, chunk_tags)
    test = _process_data(test, vocab, pos_tags, chunk_tags)
    return train, test, (vocab, pos_tags, chunk_tags)
Exemplo n.º 6
0
def load_dataframe(dataset_name):
    dataset_path = get_file(
        f'{dataset_name}.zip',
        origin=f'{BASE_PATH}/{dataset_name}.zip',
        extract=True,
        cache_subdir=Path('datasets') / 'omniglot'
    )
    dataset_dir = os.path.splitext(dataset_path)[0]
    dataset = pd.DataFrame(columns=['image_name', 'alphabet', 'label'])
    for root, _, files in os.walk(dataset_dir):
        if files:
            alphabet, label = Path(root).relative_to(dataset_dir).parts
            root = Path(root)
            image_names = [root / file for file in files]
            dataset = (
                dataset
                .append(
                    pd.DataFrame({'image_name': image_names, 'alphabet': alphabet, 'label': label}),
                    ignore_index=True,
                )
            )

    return dataset
Exemplo n.º 7
0
def ResNet50(weights="imagenet",
             input_shape=(224, 224, 3),
             output_size=1000,
             softmax=False,
             norm_layer=StatsBatchNorm):
    def block(x_in, kernel, filters, strides, stage, block, shortcut=False):
        conv_name = "res" + str(stage) + block + "_branch"
        bn_name = "bn" + str(stage) + block + "_branch"

        x = Conv2D(filters[0], 1, strides=strides, name=conv_name + "2a")(x_in)
        x = norm_layer(name=bn_name + "2a")(x)
        x = Activation("relu")(x)
        x = Conv2D(filters[1], kernel, padding="same",
                   name=conv_name + "2b")(x)
        x = norm_layer(name=bn_name + "2b")(x)
        x = Activation("relu")(x)
        x = Conv2D(filters[2], 1, name=conv_name + "2c")(x)
        x = norm_layer(name=bn_name + "2c")(x)

        if shortcut:
            s = Conv2D(filters[2], 1, strides=strides,
                       name=conv_name + "1")(x_in)
            s = norm_layer(name=bn_name + "1")(s)
        else:
            s = x_in

        return Activation("relu")(add([x, s]))

    x_in = Input(shape=input_shape)
    x = Conv2D(64, 7, strides=2, padding="same", name="conv1")(x_in)
    x = norm_layer(name="bn_conv1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = block(x, 3, [64, 64, 256], 1, 2, "a", shortcut=True)
    x = block(x, 3, [64, 64, 256], 1, 2, "b")
    x = block(x, 3, [64, 64, 256], 1, 2, "c")

    x = block(x, 3, [128, 128, 512], 2, 3, "a", shortcut=True)
    x = block(x, 3, [128, 128, 512], 1, 3, "b")
    x = block(x, 3, [128, 128, 512], 1, 3, "c")
    x = block(x, 3, [128, 128, 512], 1, 3, "d")

    x = block(x, 3, [256, 256, 1024], 2, 4, "a", shortcut=True)
    x = block(x, 3, [256, 256, 1024], 1, 4, "b")
    x = block(x, 3, [256, 256, 1024], 1, 4, "c")
    x = block(x, 3, [256, 256, 1024], 1, 4, "d")
    x = block(x, 3, [256, 256, 1024], 1, 4, "e")
    x = block(x, 3, [256, 256, 1024], 1, 4, "f")

    x = block(x, 3, [512, 512, 2048], 2, 5, "a", shortcut=True)
    x = block(x, 3, [512, 512, 2048], 1, 5, "b")
    x = block(x, 3, [512, 512, 2048], 1, 5, "c")

    x = AveragePooling2D((7, 7), name="avg_pool")(x)
    x = Flatten()(x)
    x = Dense(output_size, name="fc" + str(output_size))(x)
    if softmax:
        x = Activation("softmax")(x)

    model = Model(x_in, x, name="resnet50")

    if weights == "imagenet":
        weights_path = get_file(
            "resnet50_weights_tf_dim_ordering_tf_kernels.h5",
            RESNET50_WEIGHTS_PATH,
            cache_subdir="models",
            md5_hash="a7b3fe01876f51b976af0dea6bc144eb")
        model.load_weights(weights_path, by_name=True)

    return model
Exemplo n.º 8
0
def train(run_name, start_epoch, stop_epoch, img_w):
    # Input Parameters.
    img_h = 64
    words_per_epoch = 16000
    val_split = 0.2
    val_words = int(words_per_epoch * (val_split))

    # Network parameters.
    conv_filters = 16
    kernel_size = (3, 3)
    pool_size = 2
    time_dense_size = 32
    rnn_size = 512
    minibatch_size = 32

    if K.image_data_format() == 'channels_first':
        input_shape = (1, img_w, img_h)
    else:
        input_shape = (img_w, img_h, 1)

    fdir = os.path.dirname(
        get_file('wordlists.tgz',
                 origin='http://www.mythic-ai.com/datasets/wordlists.tgz',
                 untar=True))

    img_gen = TextImageGenerator(
        monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
        bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
        minibatch_size=minibatch_size,
        img_w=img_w,
        img_h=img_h,
        downsample_factor=(pool_size**2),
        val_split=words_per_epoch - val_words)
    act = 'relu'
    input_data = Input(name='the_input', shape=input_shape, dtype='float32')
    inner = Conv2D(conv_filters,
                   kernel_size,
                   padding='same',
                   activation=act,
                   kernel_initializer='he_normal',
                   name='conv1')(input_data)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
    inner = Conv2D(conv_filters,
                   kernel_size,
                   padding='same',
                   activation=act,
                   kernel_initializer='he_normal',
                   name='conv2')(inner)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)

    conv_to_rnn_dims = (img_w // (pool_size**2),
                        (img_h // (pool_size**2)) * conv_filters)
    inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)

    # Cuts down input size going into RNN:
    inner = Dense(time_dense_size, activation=act, name='dense1')(inner)

    # Two layers of bidirectional GRUs.
    # GRU seems to work as well, if not better than LSTM:
    gru_1 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru1')(inner)
    gru_1b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru1_b')(inner)
    gru1_merged = add([gru_1, gru_1b])
    gru_2 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru2')(gru1_merged)
    gru_2b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru2_b')(gru1_merged)

    # Transforms RNN output to character activations:
    inner = Dense(img_gen.get_output_size(),
                  kernel_initializer='he_normal',
                  name='dense2')(concatenate([gru_2, gru_2b]))
    y_pred = Activation('softmax', name='softmax')(inner)
    Model(inputs=input_data, outputs=y_pred).summary()

    labels = Input(name='the_labels',
                   shape=[img_gen.absolute_max_string_len],
                   dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')
    # Keras doesn't currently support loss funcs with extra parameters
    # so CTC loss is implemented in a lambda layer.
    loss_out = Lambda(ctc_lambda_func, output_shape=(1, ),
                      name='ctc')([y_pred, labels, input_length, label_length])

    # Clipnorm seems to speeds up convergence.
    sgd = SGD(learning_rate=0.02, decay=1e-6, momentum=0.9, nesterov=True)

    model = Model(inputs=[input_data, labels, input_length, label_length],
                  outputs=loss_out)

    # The loss calc occurs elsewhere, so use a dummy lambda func for the loss.
    model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
    if start_epoch > 0:
        weight_file = os.path.join(
            OUTPUT_DIR,
            os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
        model.load_weights(weight_file)
    # Captures output of softmax so we can decode the output during visualization.
    test_func = K.function([input_data], [y_pred])

    viz_cb = VizCallback(run_name, test_func, img_gen.next_val())

    model.fit_generator(generator=img_gen.next_train(),
                        steps_per_epoch=(words_per_epoch - val_words) //
                        minibatch_size,
                        epochs=stop_epoch,
                        validation_data=img_gen.next_val(),
                        validation_steps=val_words // minibatch_size,
                        callbacks=[viz_cb, img_gen],
                        initial_epoch=start_epoch)
Exemplo n.º 9
0
def WideResidualNetwork(depth=28,
                        width=8,
                        dropout_rate=0.0,
                        include_top=True,
                        weights='cifar10',
                        input_tensor=None,
                        input_shape=None,
                        classes=10,
                        activation='softmax'):
    """Instantiate the Wide Residual Network architecture,
        optionally loading weights pre-trained
        on CIFAR-10. Note that when using TensorFlow,
        for best performance you should set
        `image_dim_ordering="tf"` in your Keras config
        at ~/.keras/keras.json.

        The model and the weights are compatible with both
        TensorFlow and Theano. The dimension ordering
        convention used by the model is the one
        specified in your Keras config file.

        # Arguments
            depth: number or layers in the DenseNet
            width: multiplier to the ResNet width (number of filters)
            dropout_rate: dropout rate
            include_top: whether to include the fully-connected
                layer at the top of the network.
            weights: one of `None` (random initialization) or
                "cifar10" (pre-training on CIFAR-10)..
            input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
                to use as image input for the model.
            input_shape: optional shape tuple, only to be specified
                if `include_top` is False (otherwise the input shape
                has to be `(32, 32, 3)` (with `tf` dim ordering)
                or `(3, 32, 32)` (with `th` dim ordering).
                It should have exactly 3 inputs channels,
                and width and height should be no smaller than 8.
                E.g. `(200, 200, 3)` would be one valid value.
            classes: optional number of classes to classify images
                into, only to be specified if `include_top` is True, and
                if no `weights` argument is specified.

        # Returns
            A Keras model instance.
        """

    if weights not in {'cifar10', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `cifar10` '
                         '(pre-training on CIFAR-10).')

    if weights == 'cifar10' and include_top and classes != 10:
        raise ValueError('If using `weights` as CIFAR 10 with `include_top`'
                         ' as true, `classes` should be 10')

    if (depth - 4) % 6 != 0:
        raise ValueError('Depth of the network must be such that (depth - 4)'
                         'should be divisible by 6.')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=32,
                                      min_size=8,
                                      data_format=K.image_dim_ordering(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = __create_wide_residual_network(classes, img_input, include_top, depth,
                                       width, dropout_rate, activation)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='wide-resnet')

    # load weights
    if weights == 'cifar10':
        if (depth == 28) and (width == 8) and (dropout_rate == 0.0):
            # Default parameters match. Weights for this model exist:

            if K.image_dim_ordering() == 'th':
                if include_top:
                    h5_file = 'wide_resnet_28_8_th_dim_ordering_th_kernels.h5'
                    weights_path = get_file(h5_file,
                                            TH_WEIGHTS_PATH,
                                            cache_subdir='models')
                else:
                    h5_file = 'wide_resnet_28_8_th_dim_ordering_th_kernels_no_top.h5'
                    weights_path = get_file(h5_file,
                                            TH_WEIGHTS_PATH_NO_TOP,
                                            cache_subdir='models')

                model.load_weights(weights_path)

                if K.backend() == 'tensorflow':
                    warnings.warn(
                        'You are using the TensorFlow backend, yet you '
                        'are using the Theano '
                        'image dimension ordering convention '
                        '(`image_dim_ordering="th"`). '
                        'For best performance, set '
                        '`image_dim_ordering="tf"` in '
                        'your Keras config '
                        'at ~/.keras/keras.json.')
                    convert_all_kernels_in_model(model)
            else:
                if include_top:
                    h5_file = 'wide_resnet_28_8_tf_dim_ordering_tf_kernels.h5'
                    weights_path = get_file(h5_file,
                                            TF_WEIGHTS_PATH,
                                            cache_subdir='models')
                else:
                    h5_file = 'wide_resnet_28_8_tf_dim_ordering_tf_kernels_no_top.h5'
                    weights_path = get_file(h5_file,
                                            TF_WEIGHTS_PATH_NO_TOP,
                                            cache_subdir='models')

                model.load_weights(weights_path)

                if K.backend() == 'theano':
                    convert_all_kernels_in_model(model)

    return model
Exemplo n.º 10
0
def NASNet(input_shape=None,
           penultimate_filters=4032,
           nb_blocks=6,
           stem_filters=96,
           initial_reduction=True,
           skip_reduction_layer_input=True,
           use_auxiliary_branch=False,
           filters_multiplier=2,
           dropout=0.5,
           weight_decay=5e-5,
           include_top=True,
           weights=None,
           input_tensor=None,
           pooling=None,
           classes=1000,
           default_size=None,
           activation='softmax'):
    """Instantiates a NASNet architecture.
    Note that only TensorFlow is supported for now,
    therefore it only works with the data format
    `image_data_format='channels_last'` in your Keras config
    at `~/.keras/keras.json`.

    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(331, 331, 3)` for NASNetLarge or
            `(224, 224, 3)` for NASNetMobile
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(224, 224, 3)` would be one valid value.
        penultimate_filters: number of filters in the penultimate layer.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        nb_blocks: number of repeated blocks of the NASNet model.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        stem_filters: number of filters in the initial stem block
        initial_reduction: Whether to perform the reduction step at the beginning
            end of the network. Set to `True` for CIFAR models.
        skip_reduction_layer_input: Determines whether to skip the reduction layers
            when calculating the previous layer to connect to.
        use_auxiliary_branch: Whether to use the auxiliary branch during
            training or evaluation.
        filters_multiplier: controls the width of the network.
            - If `filters_multiplier` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `filters_multiplier` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `filters_multiplier` = 1, default number of filters from the paper
                 are used at each layer.
        dropout: dropout rate
        weight_decay: l2 regularization weight
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: `None` (random initialization) or
            `imagenet` (ImageNet weights)
        input_tensor: optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
        default_size: specifies the default image size of the model
        activation: Type of activation at the top layer.
            Can be one of 'softmax' or 'sigmoid'.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    if K.backend() != 'tensorflow':
        raise RuntimeError('Only Tensorflow backend is currently supported, '
                           'as other backends do not support '
                           'separable convolution.')

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as ImageNet with `include_top` '
                         'as true, `classes` should be 1000')

    if default_size is None:
        default_size = 331

    # Determine proper input shape and default size.
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_size,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top or weights)

    if K.image_data_format() != 'channels_last':
        warnings.warn('The NASNet family of models is only available '
                      'for the input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height).'
                      ' You should set `image_data_format="channels_last"` '
                      'in your Keras config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    assert penultimate_filters % 24 == 0, "`penultimate_filters` needs to be " \
                                          "divisible by 24."

    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
    filters = penultimate_filters // 24

    if initial_reduction:
        x = Conv2D(stem_filters, (3, 3), strides=(2, 2), padding='valid',
                   use_bias=False, name='stem_conv1', kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(img_input)
    else:
        x = Conv2D(stem_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False,
                   name='stem_conv1', kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(img_input)

    x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                           name='stem_bn1')(x)

    p = None
    if initial_reduction:  # imagenet / mobile mode
        x, p = _reduction_A(x, p, filters // (filters_multiplier ** 2), weight_decay,
                            id='stem_1')
        x, p = _reduction_A(x, p, filters // filters_multiplier, weight_decay,
                            id='stem_2')

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters, weight_decay, id='%d' % i)

    x, p0 = _reduction_A(x, p, filters * filters_multiplier, weight_decay,
                         id='reduce_%d' % nb_blocks)

    p = p0 if not skip_reduction_layer_input else p

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters * filters_multiplier, weight_decay,
                         id='%d' % (nb_blocks + i + 1))

    auxiliary_x = None
    if not initial_reduction:  # imagenet / mobile mode
        if use_auxiliary_branch:
            auxiliary_x = _add_auxiliary_head(x, classes, weight_decay, pooling,
                                              include_top, activation)

    x, p0 = _reduction_A(x, p, filters * filters_multiplier ** 2, weight_decay,
                         id='reduce_%d' % (2 * nb_blocks))

    if initial_reduction:  # CIFAR mode
        if use_auxiliary_branch:
            auxiliary_x = _add_auxiliary_head(x, classes, weight_decay, pooling,
                                              include_top, activation)

    p = p0 if not skip_reduction_layer_input else p

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters * filters_multiplier ** 2, weight_decay,
                         id='%d' % (2 * nb_blocks + i + 1))

    x = Activation('relu')(x)

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dropout(dropout)(x)
        x = Dense(classes, activation=activation,
                  kernel_regularizer=l2(weight_decay), name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    if use_auxiliary_branch:
        model = Model(inputs, [x, auxiliary_x], name='NASNet_with_auxiliary')
    else:
        model = Model(inputs, x, name='NASNet')

    # load weights
    if weights == 'imagenet':
        if default_size == 224:  # mobile version
            if include_top:
                if use_auxiliary_branch:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY
                    model_name = 'nasnet_mobile_with_aux.h5'
                else:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH
                    model_name = 'nasnet_mobile.h5'
            else:
                if use_auxiliary_branch:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY_NO_TOP
                    model_name = 'nasnet_mobile_with_aux_no_top.h5'
                else:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_NO_TOP
                    model_name = 'nasnet_mobile_no_top.h5'

            weights_file = get_file(model_name, weight_path, cache_subdir='models')
            model.load_weights(weights_file, by_name=True)

        elif default_size == 331:  # large version
            if include_top:
                if use_auxiliary_branch:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary
                    model_name = 'nasnet_large_with_aux.h5'
                else:
                    weight_path = NASNET_LARGE_WEIGHT_PATH
                    model_name = 'nasnet_large.h5'
            else:
                if use_auxiliary_branch:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary_NO_TOP
                    model_name = 'nasnet_large_with_aux_no_top.h5'
                else:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_NO_TOP
                    model_name = 'nasnet_large_no_top.h5'

            weights_file = get_file(model_name, weight_path, cache_subdir='models')
            model.load_weights(weights_file, by_name=True)

        else:
            raise ValueError('ImageNet weights can only be loaded on NASNetLarge '
                             'or NASNetMobile')

    if old_data_format:
        K.set_image_data_format(old_data_format)

    return model
Exemplo n.º 11
0
def SENET50(include_top=True, weights='vggface',
            input_tensor=None, input_shape=None,
            pooling=None,
            classes=8631):
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=197,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    bn_eps = 0.0001

    x = Conv2D(
        64, (7, 7), use_bias=False, strides=(2, 2), padding='same',
        name='conv1/7x7_s2')(img_input)
    x = BatchNormalization(axis=bn_axis, name='conv1/7x7_s2/bn',epsilon=bn_eps)(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = senet_conv_block(x, 3, [64, 64, 256], stage=2, block=1, strides=(1, 1))
    x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=2)
    x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=3)

    x = senet_conv_block(x, 3, [128, 128, 512], stage=3, block=1)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=2)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=3)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=4)

    x = senet_conv_block(x, 3, [256, 256, 1024], stage=4, block=1)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=2)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=3)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=4)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=5)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=6)

    x = senet_conv_block(x, 3, [512, 512, 2048], stage=5, block=1)
    x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=2)
    x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=3)

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='classifier')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='vggface_senet50')

    # load weights
    if weights == 'vggface':
        if include_top:
            weights_path = get_file('rcmalli_vggface_tf_senet50.h5',
                                    utils.SENET50_WEIGHTS_PATH,
                                    cache_subdir=utils.VGGFACE_DIR)
        else:
            weights_path = get_file('rcmalli_vggface_tf_notop_senet50.h5',
                                    utils.SENET50_WEIGHTS_PATH_NO_TOP,
                                    cache_subdir=utils.VGGFACE_DIR)
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='classifier')
                layer_utils.convert_dense_weights_data_format(dense, shape,
                                                              'channels_first')

        if K.image_data_format() == 'channels_first' and K.backend() == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')
    elif weights is not None:
        model.load_weights(weights)

    return model
Exemplo n.º 12
0
def VGG16(include_top=True, weights='vggface',
          input_tensor=None, input_shape=None,
          pooling=None,
          classes=2622):
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=48,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # Block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_1')(
        img_input)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(
        x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(
        x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(
        x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5')(x)

    if include_top:
        # Classification block
        x = Flatten(name='flatten')(x)
        x = Dense(4096, name='fc6')(x)
        x = Activation('relu', name='fc6/relu')(x)
        x = Dense(4096, name='fc7')(x)
        x = Activation('relu', name='fc7/relu')(x)
        x = Dense(classes, name='fc8')(x)
        x = Activation('softmax', name='fc8/softmax')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

            # Ensure that the model takes into account
            # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
        # Create model.
    model = Model(inputs, x, name='vggface_vgg16')  # load weights
    if weights == 'vggface':
        if include_top:
            weights_path = get_file('rcmalli_vggface_tf_vgg16.h5',
                                    utils.
                                    VGG16_WEIGHTS_PATH,
                                    cache_subdir=utils.VGGFACE_DIR)
        else:
            weights_path = get_file('rcmalli_vggface_tf_notop_vgg16.h5',
                                    utils.VGG16_WEIGHTS_PATH_NO_TOP,
                                    cache_subdir=utils.VGGFACE_DIR)
        model.load_weights(weights_path, by_name=True)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first':
            if include_top:
                maxpool = model.get_layer(name='pool5')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc6')
                layer_utils.convert_dense_weights_data_format(dense, shape,
                                                              'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model
Exemplo n.º 13
0
def memn2n_babi_example():
	try:
		path = get_file('babi-tasks-v1-2.tar.gz',
						origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
	except:
		print('Error downloading dataset, please download it manually:\n'
			  '$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2'
			  '.tar.gz\n'
			  '$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
		raise

	challenges = {
		# QA1 with 10,000 samples
		'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_'
									  'single-supporting-fact_{}.txt',
		# QA2 with 10,000 samples
		'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_'
									'two-supporting-facts_{}.txt',
	}
	challenge_type = 'single_supporting_fact_10k'
	challenge = challenges[challenge_type]

	print('Extracting stories for the challenge:', challenge_type)
	with tarfile.open(path) as tar:
		train_stories = get_stories(tar.extractfile(challenge.format('train')))
		test_stories = get_stories(tar.extractfile(challenge.format('test')))

	vocab = set()
	for story, q, answer in train_stories + test_stories:
		vocab |= set(story + q + [answer])
	vocab = sorted(vocab)

	# Reserve 0 for masking via pad_sequences.
	vocab_size = len(vocab) + 1
	story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))
	query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))

	print('-')
	print('Vocab size:', vocab_size, 'unique words')
	print('Story max length:', story_maxlen, 'words')
	print('Query max length:', query_maxlen, 'words')
	print('Number of training stories:', len(train_stories))
	print('Number of test stories:', len(test_stories))
	print('-')
	print('Here\'s what a "story" tuple looks like (input, query, answer):')
	print(train_stories[0])
	print('-')
	print('Vectorizing the word sequences...')

	word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
	inputs_train, queries_train, answers_train = vectorize_stories(train_stories)
	inputs_test, queries_test, answers_test = vectorize_stories(test_stories)

	print('-')
	print('inputs: integer tensor of shape (samples, max_length)')
	print('inputs_train shape:', inputs_train.shape)
	print('inputs_test shape:', inputs_test.shape)
	print('-')
	print('queries: integer tensor of shape (samples, max_length)')
	print('queries_train shape:', queries_train.shape)
	print('queries_test shape:', queries_test.shape)
	print('-')
	print('answers: binary (1 or 0) tensor of shape (samples, vocab_size)')
	print('answers_train shape:', answers_train.shape)
	print('answers_test shape:', answers_test.shape)
	print('-')
	print('Compiling...')

	# Placeholders.
	input_sequence = Input((story_maxlen,))
	question = Input((query_maxlen,))

	# Encoders.
	# Embed the input sequence into a sequence of vectors.
	input_encoder_m = Sequential()
	input_encoder_m.add(Embedding(input_dim=vocab_size,
								  output_dim=64))
	input_encoder_m.add(Dropout(0.3))
	# Output: (samples, story_maxlen, embedding_dim).

	# Embed the input into a sequence of vectors of size query_maxlen.
	input_encoder_c = Sequential()
	input_encoder_c.add(Embedding(input_dim=vocab_size,
								  output_dim=query_maxlen))
	input_encoder_c.add(Dropout(0.3))
	# Output: (samples, story_maxlen, query_maxlen).

	# Embed the question into a sequence of vectors.
	question_encoder = Sequential()
	question_encoder.add(Embedding(input_dim=vocab_size,
								   output_dim=64,
								   input_length=query_maxlen))
	question_encoder.add(Dropout(0.3))
	# Output: (samples, query_maxlen, embedding_dim).

	# Encode input sequence and questions (which are indices) to sequences of dense vectors.
	input_encoded_m = input_encoder_m(input_sequence)
	input_encoded_c = input_encoder_c(input_sequence)
	question_encoded = question_encoder(question)

	# Compute a 'match' between the first input vector sequence and the question vector sequence.
	# Shape: (samples, story_maxlen, query_maxlen).
	match = dot([input_encoded_m, question_encoded], axes=(2, 2))
	match = Activation('softmax')(match)

	# Add the match matrix with the second input vector sequence.
	response = add([match, input_encoded_c])  # (samples, story_maxlen, query_maxlen).
	response = Permute((2, 1))(response)  # (samples, query_maxlen, story_maxlen).

	# Concatenate the match matrix with the question vector sequence.
	answer = concatenate([response, question_encoded])

	# The original paper uses a matrix multiplication for this reduction step.
	# We choose to use a RNN instead.
	answer = LSTM(32)(answer)  # (samples, 32).

	# One regularization layer -- more would probably be needed.
	answer = Dropout(0.3)(answer)
	answer = Dense(vocab_size)(answer)  # (samples, vocab_size).
	# We output a probability distribution over the vocabulary.
	answer = Activation('softmax')(answer)

	# Build the final model.
	model = Model([input_sequence, question], answer)
	model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy',
				  metrics=['accuracy'])

	# Train.
	model.fit([inputs_train, queries_train], answers_train,
			  batch_size=32,
			  epochs=120,
			  validation_data=([inputs_test, queries_test], answers_test))