コード例 #1
0
def conv2d(x,
           kernel,
           strides=(1, 1),
           padding='valid',
           data_format='channels_first',
           image_shape=None,
           filter_shape=None):
    """2D convolution.

    # Arguments
        x: Input tensor
        kernel: kernel tensor.
        strides: strides tuple.
        padding: string, "same" or "valid".
        data_format: 'channels_first' or 'channels_last'.
            Whether to use Theano or TensorFlow dimension
            ordering in inputs/kernels/ouputs.
        image_shape: Optional, the input tensor shape
        filter_shape: Optional, the kernel shape.

    # Returns
        x convolved with the kernel.

    # Raises
        Exception: In case of invalid border mode or data format.
    """
    if padding == 'same':
        padding = 'SAME'
    elif padding == 'valid':
        padding = 'VALID'
    else:
        raise Exception('Invalid border mode: ' + str(padding))

    strides = (1, ) + strides + (1, )

    if floatx() == 'float64':
        # tf conv2d only supports float32
        x = tf.cast(x, 'float32')
        kernel = tf.cast(kernel, 'float32')

    if data_format == 'channels_first':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = tf.transpose(x, (0, 2, 3, 1))
        kernel = tf.transpose(kernel, (2, 3, 1, 0))
        x = tf.nn.conv2d(x, kernel, strides, padding=padding)
        x = tf.transpose(x, (0, 3, 1, 2))
    elif data_format == 'channels_last':
        x = tf.nn.conv2d(x, kernel, strides, padding=padding)
    else:
        raise Exception('Unknown data_format: ' + str(data_format))

    if floatx() == 'float64':
        x = tf.cast(x, 'float64')
    return x
コード例 #2
0
 def testMicroAddElementsFail(self):
     data = [m(3, 3), m(3, 3)]
     test_func = self.testAddElements
     args = list()
     ###############
     x = [
         pkb.placeholder(shape=t.shape) for t in data
         if isinstance(t, np.ndarray)
     ]
     xv = [
         pkb.variable(t, dtype=floatx()) for t in data
         if isinstance(t, np.ndarray)
     ]
     par = [t for t in data if not isinstance(t, np.ndarray)]
     grad_funcs = test_func(pkb, *(x + par + list(args)))
     funcs = test_func(pkb, *(xv + par + list(args)))
     #for gf, f in zip(grad_funcs, funcs):
     gf = grad_funcs[0]
     f = funcs[0]
     df = pkb.gradients(pkb.mean(gf), x)
     gfn = pkb.function(x, df, updates=[])
     fr = f.eval()
     gr = gfn([t for t in data if isinstance(t, np.ndarray)])
     if args.verbose:
         print(pkb, fr, gr)
     results.append((fr, gr))
     return results
コード例 #3
0
def m(*args, **kwargs):
    dtype = kwargs.get('dtype', floatx())
    """Makes a test matrix whose dimensions are the supplied arguments."""
    total = functools.reduce(operator.mul, args, 1)
    arr = np.array(range(-2, total - 2), dtype=dtype)
    arr = np.reshape(arr, args)
    return arr
コード例 #4
0
ファイル: theano_backend.py プロジェクト: fsadannn/snn
def make_sparse(value, dtype=None):
    if dtype is None:
        dtype = floatx()
    assert hasattr(value, 'tocoo')
    _assert_sparse_module()
    var = th_sparse_module.as_sparse_variable(value)
    return var
コード例 #5
0
 def run_one_backend(self, data, test_func, b, *args):
     tf_session = tensorflow.Session()
     tf.set_session(tf_session)
     results = []
     with tf_session.as_default():
         x = [
             b.placeholder(shape=t.shape) for t in data
             if hasattr(t, 'shape')
         ]
         xv = [
             b.variable(t, dtype=floatx()) for t in data
             if hasattr(t, 'shape')
         ]
         ps = [t for t in data if not hasattr(t, 'shape')]
         grad_funcs = test_func(self, b, *(x + ps + list(args)))
         funcs = test_func(self, b, *(xv + ps + list(args)))
         tf_session.run(tensorflow.global_variables_initializer())
         for gf, f in zip(grad_funcs, funcs):
             df = b.gradients(b.mean(gf), x)
             gfn = b.function(x, df, updates=[])
             fr = f.eval()
             gr = gfn([t for t in data if hasattr(t, 'shape')])
             if verbose:
                 print(b, fr, gr)
             results.append((fr, gr))
     tf_session.close()
     return results
コード例 #6
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
    dtype = plaidml.DType.from_numpy(dtype or floatx())
    # TODO: Need to support empty shapes; once supported, convert below to `if _ is not None`
    if shape:
        return _KerasNode('placeholder', shape=edsl.LogicalShape(dtype, shape), name=name)
    if ndim:
        return _KerasNode('placeholder', shape=edsl.LogicalShape(dtype, [0] * ndim), name=name)
    raise ValueError()
コード例 #7
0
ファイル: numpy_backend.py プロジェクト: sallamander/ktorch
def constant(value, dtype=None, shape=None, name=None):
    if dtype is None:
        dtype = floatx()
    if shape is None:
        shape = ()
    np_value = value * np.ones(shape)
    np_value.astype(dtype)
    return np_value
コード例 #8
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
    rng_state = _make_rng_state(seed)
    R = edsl.prng(rng_state.tensor, shape)
    dtype = dtype or floatx()
    if dtype != 'float32':
        R = edsl.cast(R, plaidml.DType.from_numpy(dtype))
    O = (maxval - minval) * R + minval
    return _KerasNode('random_uniform', tensor=O)
コード例 #9
0
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
    logger.debug('truncated_normal(shape: {}, mean: {}, stddev: {}, dtype: {}, seed: {})'.format(
        shape, mean, stddev, dtype, seed))
    if dtype is None:
        dtype = floatx()
    if seed:
        np.random.seed(seed)
    return variable(stddev * scipy.stats.truncnorm.rvs(-2.0, 2.0, size=shape) + mean, dtype)
コード例 #10
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None):
    if dtype is None:
        dtype = floatx()
    elif isinstance(dtype, plaidml.DType):
        dtype = ptile.convert_pml_dtype_to_np(dtype)
    if seed:
        np.random.seed(seed)
    data = np.random.normal(mean, scale, shape).astype(dtype)
    return variable(data, dtype=dtype, name=name)
コード例 #11
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def constant(value, dtype=None, shape=None, name=None):
    if shape is None:
        if isinstance(value, np.ndarray):
            shape = value.shape
        elif isinstance(value, list) or isinstance(value, tuple):
            shape = (len(value),)
        else:
            shape = (1,)
    np_value = np.full(shape, value, dtype=dtype or floatx())
    return _KerasNode('constant', name=name, value=np_value)
コード例 #12
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def zeros_like(x, dtype=None, name=None):
    value = np.full((1), 0, dtype=dtype or floatx())
    zero = _create_var('a_zero', value)
    I = x.tensor
    ndim = I.shape.ndims
    dims = edsl.TensorDims(ndim)
    idxs = edsl.TensorIndexes(ndim)
    I.bind_dims(*dims)
    O = edsl.TensorOutput(*dims)
    O[idxs] = zero[0]
    return _KerasNode('zeros_like', name=name, tensor=O)
コード例 #13
0
def make_sparse(value, dtype=None):
    if dtype is None:
        dtype = floatx()
    assert hasattr(value, 'tocoo')
    sparse_coo = value.tocoo()
    indices = np.concatenate(
        (np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1)),
        1)
    v = tf.SparseTensor(indices=indices,
                        values=sparse_coo.data,
                        dense_shape=sparse_coo.shape)
    return v
コード例 #14
0
def n(*args):
    """Makes a test matrix whose dimensions are the supplied arguments.

    Differs from m only in what values it has."""
    total = functools.reduce(operator.mul, args, 1)
    arr = np.array(range(-11, total - 11), dtype=floatx())
    arr = np.reshape(arr, args)
    for i in range(5):
        if len(args) > i + 1:
            np.swapaxes(arr, 0, i + 1)
    arr = np.reshape(arr, args)
    return arr
コード例 #15
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed:
        np.random.seed(seed)
    # TODO: We only use half of the Box-Muller here
    u1 = random_uniform(shape, dtype='float32')
    u2 = random_uniform(shape, dtype='float32')
    z0 = sqrt(-2.0 * log(u1 + (1.0 / (2**33)))) * cos(2.0 * math.pi * u2)
    z0 = stddev * z0
    z0 = z0 + mean
    if dtype != 'float32':
        z0 = cast(z0, dtype)
    return z0
コード例 #16
0
ファイル: backend.py プロジェクト: evankos/MED
    def __init__(self, extend_labels=False, multilabel=False):
        self._multilabel = multilabel
        self._extend_labels = extend_labels
        self._labels = np.loadtxt(label_file(), dtype=floatx())
        self._class_index = open(class_index_file(), 'r').read().split('\n')
        if extend_labels:
            self._word_tree = Tree()
            self._extra_labels = {
                "layer_1":
                np.zeros((self._labels.shape[0],
                          len(self._word_tree.extra_activations["layer_1"]))),
                "layer_2":
                np.zeros((self._labels.shape[0],
                          len(self._word_tree.extra_activations["layer_2"]))),
                "layer_3":
                np.zeros((self._labels.shape[0],
                          len(self._word_tree.extra_activations["layer_3"])))
            }

            for sample in range(self._labels.shape[0]):
                #Removing Multilabel 1
                if not multilabel:
                    search = np.where(self._labels[sample] == 1)
                    if search[0].shape[0] > 1:
                        self._labels[sample] = np.zeros(
                            (self._labels.shape[1]), dtype=floatx())
                        self._labels[sample, search[0][0]] = 1.
                for o_layer in list(
                        self._word_tree.coocurrence_indexes.keys()):
                    activations = np.where(self._labels[sample] == 1.)[0]
                    groups = self._word_tree.coocurrence_indexes[o_layer][
                        activations]
                    try:
                        self._extra_labels[o_layer][sample, groups] = 1.
                    except Exception:
                        print(Exception, o_layer, activations)
                        exit()
コード例 #17
0
def _postprocess_conv2d_output(x, data_format):
    """Transpose and cast the output from conv2d if needed.
    # Arguments
        x: A tensor.
        data_format: string, `"channels_last"` or `"channels_first"`.
    # Returns
        A tensor.
    """

    if data_format == 'channels_first':
        x = tf.transpose(x, (0, 3, 1, 2))

    if floatx() == 'float64':
        x = tf.cast(x, 'float64')
    return x
コード例 #18
0
ファイル: filters.py プロジェクト: nebw/beras
def sobel(img, border_mode='zero'):
    filter = np.array([
        [1, 0, -1],
        [2, 0, -2],
        [1, 0, -1],
    ], dtype=floatx())
    img, conv_border = add_border(img, border=1, mode=border_mode)
    kernel_x = theano.shared(filter[np.newaxis, np.newaxis])
    kernel_y = theano.shared(np.transpose(filter)[np.newaxis, np.newaxis])
    conv_x = T.nnet.conv2d(img, kernel_x, border_mode=conv_border,
                           input_shape=(None, 1, None, None),
                           filter_shape=(1, 1, 3, 3))
    conv_y = T.nnet.conv2d(img, kernel_y, border_mode=conv_border,
                           input_shape=(None, 1, None, None),
                           filter_shape=(1, 1, 3, 3))
    return conv_x, conv_y
コード例 #19
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def variable(value, dtype=None, name=None, constraint=None):
    if name is None:
        name = 'anon'
    dtype = dtype or floatx()
    if isinstance(value, _KerasNode):
        value = value.eval()
    if isinstance(value, float) or isinstance(value, six.integer_types):
        value = np.array(value, dtype=dtype)
    if isinstance(value, list) or isinstance(value, tuple):
        value = np.array(value, dtype=dtype)
    if isinstance(value, np.ndarray):
        if dtype != value.dtype:
            logger.debug(
                'Casting to requested dtype in variable, received {} and requested {}'.format(
                    value.dtype, dtype))
            value = value.astype(dtype)
        return _KerasNode('variable', name=name, value=value)
    raise TypeError('Unknown type for variable: {}'.format(type(value)))
コード例 #20
0
def sobel(img, border_mode='zero'):
    filter = np.array([
        [1, 0, -1],
        [2, 0, -2],
        [1, 0, -1],
    ], dtype=floatx())
    img, conv_border = add_border(img, border=1, mode=border_mode)
    kernel_x = theano.shared(filter[np.newaxis, np.newaxis])
    kernel_y = theano.shared(np.transpose(filter)[np.newaxis, np.newaxis])
    conv_x = T.nnet.conv2d(img,
                           kernel_x,
                           border_mode=conv_border,
                           input_shape=(None, 1, None, None),
                           filter_shape=(1, 1, 3, 3))
    conv_y = T.nnet.conv2d(img,
                           kernel_y,
                           border_mode=conv_border,
                           input_shape=(None, 1, None, None),
                           filter_shape=(1, 1, 3, 3))
    return conv_x, conv_y
コード例 #21
0
ファイル: theano_backend.py プロジェクト: fsadannn/snn
def sparse_mean(x, axis=None):
    """Mean of a tensor, alongside the specified axis.
    """
    # bool is available since theano v0.9dev
    if 'int' in x.dtype or x.dtype == 'bool':
        dtype = floatx()
    else:
        dtype = x.dtype

    if isinstance(axis, (integer_types, np.integer)):
        if axis == -1:
            axis = max(x.ndim - 1, 0)
    s = th_sparse_module.sp_sum(x, axis, True)
    shp = shape(x)

    if s.dtype in ('float16', 'float32', 'complex64'):
        shp = cast(shp, 'float32')
    else:
        shp = cast(shp, 'float64')

    if axis is None:
        axis = list(range(len(x.data.shape)))
    elif isinstance(axis, (integer_types, np.integer)):
        axis = [axis]
    elif isinstance(axis, np.ndarray) and axis.ndim == 0:
        axis = [int(axis)]
    else:
        axis = [int(a) for a in axis]

    for i in axis:
        s = true_div(s, shp[i])

    if s.dtype != shp.dtype and s.dtype in discrete_dtypes:
        s = cast(s, shp.dtype)

    if dtype == 'float16' or (dtype is None and x.dtype == 'float16'):
        s = cast(s, 'float16')
    s.name = 'mean'
    return s
コード例 #22
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed:
        np.random.seed(seed)
    return variable(stddev * scipy.stats.truncnorm.rvs(-2.0, 2.0, size=shape) + mean, dtype)
コード例 #23
0
def zeros(shape, dtype=None, name=None):
    logger.debug('zeros(shape: {}, dtype: {}, name: {})'.format(shape, dtype, name))
    value = np.full(shape, 0, dtype=dtype or floatx())
    return _KerasNode('zeros', name=name, value=value)
コード例 #24
0
ファイル: numpy_backend.py プロジェクト: sallamander/ktorch
def ones_like(x, dtype=floatx(), name=None):
    return np.ones_like(x, dtype=dtype)
コード例 #25
0
def train(db, entity_db, vocab, word2vec, out_file, mode, text_len, dim_size,
          negative, epoch, batch_size, word_static, entity_static,
          include_title, optimizer, dev_size, patience, num_links,
          random_seed):
    np.random.seed(random_seed)

    click.echo('Initializing weights...')
    word_embedding = np.random.uniform(low=-0.05,
                                       high=0.05,
                                       size=(vocab.word_size, dim_size))
    word_embedding = np.vstack([np.zeros(dim_size), word_embedding])
    word_embedding = word_embedding.astype(floatx())

    entity_embedding = np.random.uniform(low=-0.05,
                                         high=0.05,
                                         size=(vocab.entity_size, dim_size))
    entity_embedding = entity_embedding.astype(floatx())

    if word2vec:
        for word in vocab.words():
            try:
                vec = word2vec.get_word_vector(word)
            except KeyError:
                continue

            if vec is not None:
                word_embedding[vocab.get_word_index(word) + 1] = vec

        for entity in vocab.entities():
            try:
                vec = word2vec.get_entity_vector(entity)
            except KeyError:
                continue
            if vec is not None:
                entity_embedding[vocab.get_entity_index(
                    entity)] = vec / np.linalg.norm(vec, 2)

    tokenizer = RegexpTokenizer()

    if mode == 'sentence':
        sentence_detector = OpenNLPSentenceDetector()

    def generate_data(keys, count_links=False, shuffle=True, loop=True):
        num_entities = entity_embedding.shape[0]
        labels = np.zeros((batch_size, negative + 1), dtype=np.int)
        labels[:, 0] = 1

        while True:
            word_batch = []
            entity_batch = []

            if shuffle:
                keys = np.random.permutation(keys)

            for key in keys:
                value = db[key]
                text = value['text']
                links = value['links']

                target_data = []
                if mode == 'paragraph':
                    target_data = [(text, links)]
                    if include_title:
                        target_data[0][1].append(
                            (None, key.decode('utf-8'), None))

                elif mode == 'sentence':
                    for (start,
                         end) in sentence_detector.sent_pos_detect(text):
                        target_data.append((text[start:end], [
                            (l[0], l[1], (l[2][0] - start, l[2][1] - start))
                            for l in links if start <= l[2][0] < end
                        ]))
                        if include_title:
                            target_data[-1][1].append(
                                (None, key.decode('utf-8'), None))
                else:
                    raise NotImplementedError()

                for (target_text, target_links) in target_data:
                    word_indices = []
                    word_offsets = []
                    for token in tokenizer.tokenize(target_text):
                        word_index = vocab.get_word_index(token.text.lower())
                        if word_index is not None:
                            word_indices.append(word_index + 1)
                            word_offsets.append(token.span[0])

                    positive_ids = [
                        vocab.get_entity_index(
                            entity_db.resolve_redirect(title))
                        for (_, title, _) in target_links
                    ]
                    positive_id_set = frozenset(
                        [o for o in positive_ids if o is not None])

                    for (positive_id,
                         (_, title, span)) in zip(positive_ids, target_links):
                        if positive_id is None:
                            continue

                        if not word_indices:
                            continue

                        if count_links:
                            yield 1
                            continue

                        negatives = []
                        while True:
                            negative_id = np.random.randint(0, num_entities)
                            if negative_id not in positive_id_set:
                                negatives.append(negative_id)
                            if len(negatives) == negative:
                                break

                        word_batch.append(word_indices)

                        entity_indices = [positive_id] + negatives
                        entity_batch.append(entity_indices)

                        if len(word_batch) == batch_size:
                            yield ([
                                pad_sequences(word_batch, maxlen=text_len),
                                np.vstack(entity_batch)
                            ], labels)

                            word_batch = []
                            entity_batch = []

            if word_batch:
                yield ([
                    pad_sequences(word_batch, maxlen=text_len),
                    np.vstack(entity_batch)
                ], labels[:len(word_batch)])

            if not loop or count_links:
                break

    (train_keys, dev_keys) = train_test_split(db.keys(),
                                              test_size=dev_size,
                                              random_state=random_seed)

    if num_links is None:
        click.echo('Counting links...')
        with click.progressbar(train_keys) as bar:
            num_links = sum(
                list(generate_data(bar, count_links=True, shuffle=False)))

        click.echo('The number of links: %d' % num_links)

    dev_data = list(generate_data(dev_keys, loop=False))
    dev_data = ([
        np.vstack([d[0][0] for d in dev_data]),
        np.vstack([d[0][1] for d in dev_data])
    ], np.vstack([d[1] for d in dev_data]))

    callbacks = []
    callbacks.append(
        ModelCheckpoint(out_file + '_checkpoint.h5',
                        monitor='val_acc',
                        save_best_only=True))
    if patience:
        callbacks.append(EarlyStopping(monitor='val_acc', patience=patience))

    model = build_model(
        text_len=text_len,
        negative_size=negative,
        optimizer=optimizer,
        word_size=word_embedding.shape[0],
        entity_size=entity_embedding.shape[0],
        dim_size=word_embedding.shape[1],
        word_static=word_static,
        entity_static=entity_static,
        word_embedding=word_embedding,
        entity_embedding=entity_embedding,
    )

    model.fit_generator(generate_data(train_keys),
                        samples_per_epoch=num_links,
                        nb_epoch=epoch,
                        validation_data=dev_data,
                        max_q_size=1000,
                        callbacks=callbacks)

    db.close()

    word_embedding = model.get_layer('word_embedding').get_weights()[0][1:]
    entity_embedding = model.get_layer('entity_embedding').get_weights()[0]

    ret = dict(
        word_embedding=word_embedding,
        entity_embedding=entity_embedding,
        vocab=vocab,
    )
    ret['W'] = model.get_layer('text_layer').get_weights()[0]
    ret['b'] = model.get_layer('text_layer').get_weights()[1]

    joblib.dump(ret, out_file + '.joblib', protocol=-1)
コード例 #26
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def eye(size, dtype=None, name=None):
    if dtype is None:
        dtype = floatx()
    elif isinstance(dtype, plaidml.DType):
        dtype = dtype.into_numpy()
    return variable(np.eye(size, dtype=dtype), name=name, dtype=dtype)
コード例 #27
0
ファイル: layers.py プロジェクト: studio-ousia/ntee
# -*- coding: utf-8 -*-

import numpy as np
import theano
from keras import backend as K
from keras.backend.common import floatx
from keras.engine.topology import Layer
from theano import tensor as T

FLOATX = floatx()
FLOAT_MIN = np.finfo('float32').min + K.epsilon()
FLOAT_MAX = np.finfo('float32').max - K.epsilon()


class TextRepresentationLayer(Layer):
    def __init__(self, W=None, b=None, *args, **kwargs):
        super(TextRepresentationLayer, self).__init__(*args, **kwargs)

        self.W = W
        self.b = b

    def build(self, input_shape):
        if self.W is None:
            self.W = K.variable(np.identity(input_shape[0][2]))
        elif isinstance(self.W, np.ndarray):
            self.W = K.variable(self.W)
        else:
            raise RuntimeError()

        if self.b is None:
            self.b = K.random_uniform_variable((input_shape[0][2],), -0.05, 0.05)
コード例 #28
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def zeros(shape, dtype=None, name=None):
    value = np.full(shape, 0, dtype=dtype or floatx())
    return _KerasNode('zeros', name=name, value=value)
コード例 #29
0
ファイル: vgg19.py プロジェクト: flaub/plaidbench
def build_model():
    import keras.applications as kapp
    from keras.layers import Input
    from keras.backend.common import floatx
    inputLayer = Input(shape=(224, 224, 3), dtype=floatx())
    return kapp.VGG19(input_tensor=inputLayer)
コード例 #30
0
ファイル: numpy_backend.py プロジェクト: sallamander/ktorch
def zeros_like(x, dtype=floatx(), name=None):
    return np.zeros_like(x, dtype=dtype)
コード例 #31
0
ファイル: __init__.py プロジェクト: whztt07/plaidml
def ones(shape, dtype=None, name=None):
    value = np.full(shape, 1, dtype=dtype or floatx())
    return _KerasNode('ones', name=name, value=value)
コード例 #32
0
ファイル: numpy_backend.py プロジェクト: sallamander/ktorch
def ones(shape, dtype=floatx(), name=None):
    return np.ones(shape, dtype=dtype)