Example #1
0
    def test_dimshuffle_false_get_output_for(self, DummyInputLayer):
        try:
            from lasagne.layers.cuda_convnet import Conv2DCCLayer
        except ImportError:
            pytest.skip("cuda_convnet not available")

        # this implementation is tested against FilterActs instead of
        # theano.tensor.nnet.conv.conv2d because using the latter leads to
        # numerical precision errors.
        from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
        filter_acts = FilterActs(stride=1, pad=0, partial_sum=1)

        input = theano.shared(floatX(np.random.random((4, 5, 5, 8))))
        kernel = theano.shared(floatX(np.random.random((4, 3, 3, 16))))

        input_layer = DummyInputLayer((4, 5, 5, 8))  # c01b instead of bc01
        layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
                              dimshuffle=False, W=kernel, b=None,
                              nonlinearity=None)

        output = np.array(filter_acts(input, kernel).eval())

        actual = layer.get_output_for(input).eval()
        actual = np.array(actual)
        assert actual.shape == output.shape
        assert actual.shape == layer.output_shape
        assert np.allclose(actual, output)
def prep_for_vgg(url,i,dataset,datadir,width=224,filetype="jpg"):
    '''
    Check to see image file has been downloaded at current size.  If it has not,
    download and resize image. Saves file to datadir/images/[dataset]_[idx]_w[width].[filetype]
    e.g. datadir/images/train_10001_w256.bmp

    args: same as that of dl.prep_image
        url: url of image source
        i: image index
        dataset: string 'train' or 'test' or other identifier
        datadir: data directory
        width: desired width of image. Will be resized to width squared
    returns:
        rawim: scaled and cropped image
    '''
    rawim = dl.prep_image(url,i,dataset,datadir,width,filetype)
    if rawim is None: #If image fails to download, produce 'image' of NaN's with same shape
        im=floatX(np.tile(0,(1,3,width,width)))
    else:
        # Shuffle axes to c01
        im = np.swapaxes(np.swapaxes(rawim, 1, 2), 0, 1)

        # Convert to BGR
        im = im[::-1, :, :]

        im = im - MEAN_IMAGE
        im=floatX(im[np.newaxis])
    return im
Example #3
0
    def test_set_all_param_values(self):
        from lasagne.layers import (InputLayer, DenseLayer,
                                    set_all_param_values)
        from lasagne.utils import floatX

        l1 = InputLayer((10, 20))
        l2 = DenseLayer(l1, 30)
        l3 = DenseLayer(l2, 40)

        a2 = floatX(numpy.random.normal(0, 1, (20, 30)))
        b2 = floatX(numpy.random.normal(0, 1, (30,)))
        a3 = floatX(numpy.random.normal(0, 1, (30, 40)))
        b3 = floatX(numpy.random.normal(0, 1, (40,)))
        set_all_param_values(l3, [a2, b2, a3, b3])
        assert numpy.allclose(l3.W.get_value(), a3)
        assert numpy.allclose(l3.b.get_value(), b3)
        assert numpy.allclose(l2.W.get_value(), a2)
        assert numpy.allclose(l2.b.get_value(), b2)

        with pytest.raises(ValueError):
            set_all_param_values(l3, [a3, b3, a2])

        with pytest.raises(ValueError):
            a3_bad = floatX(numpy.random.normal(0, 1, (25, 40)))
            set_all_param_values(l3, [a2, b2, a3_bad, b3])
Example #4
0
 def __init__(self, incoming, dimension, params_init=(GlorotUniform(),
                                                      GlorotUniform(),
                                                      Uniform([0, 0.1])),
              addition_parameters=[False], **kwargs):
     '''
     init parameters
     :param incoming: input to the LISTA layer
     :param dimension: 2 numbers list.
      dimension[0] is dict_size, length of dictionary vector in LISTA. dimension[1] is T a.k.a depth
     :param params_init: init value or init method for LISTA
     :transposed: = True if the input dictionary D is the transpose matrix of a theano.compile.SharedVariable V.
      In that case self.W = D^T = V^T^T = V
     :param kwargs: parameters of super class
     :return:
     '''
     super(LISTA, self).__init__(incoming, **kwargs)
     self.transposed = addition_parameters[0]
     num_inputs = incoming.output_shape[-1]
     self.dict_size = dimension[0]
     self.T = dimension[1]
     self.W = self.add_param(params_init[0], [num_inputs, self.dict_size], name='W',
                             lista=True, lista_weight_W=True, sparse_dictionary=True, regularizable=True)
     # self.S = self.add_param(params_init[1], [self.dict_size, self.dict_size], name='S',
     #                         lista=True, lista_weight_W=True, regularizable=True)
     if T > 0:
         self.S = T.eye(self.dict_size) - T.dot(self.get_dictionary(), self.get_dictionary().T)
         self.S = self.add_param(theano.shared(floatX(self.S.eval())), [self.dict_size, self.dict_size], name='S',
                                 lista=True, lista_weight_S=True, regularizable=True)
     self.theta = self.add_param(theano.shared(floatX(0.01 * np.ones([self.dict_size, ]))), [self.dict_size, ],
                                 name='theta',
                                 lista=True, lista_fun_param=True, regularizable=False)
     self.eps = 1e-6
     self.clipped_theta = T.clip(self.theta, self.eps, 10)
def BatchNorm(layer, include=True, mean=0.0, std=0):
    if include:
        if std == 0:
            return batch_norm(layer)
        return batch_norm(
            layer,
            mean=floatX(np.zeros(layer.output_shape[1])+mean),
            inv_std=floatX(np.random.normal(0.0, std, layer.output_shape[1])))
    else:
        return layer
Example #6
0
def test_transform_identity():
    from lasagne.layers import InputLayer, TransformerLayer
    from lasagne.utils import floatX
    from theano.tensor import constant
    batchsize = 10
    l_in = InputLayer((batchsize, 3, 28, 28))
    l_loc = InputLayer((batchsize, 6))
    layer = TransformerLayer(l_in, l_loc)
    inputs = floatX(np.arange(np.prod(l_in.shape)).reshape(l_in.shape))
    thetas = floatX(np.tile([1, 0, 0, 0, 1, 0], (batchsize, 1)))
    outputs = layer.get_output_for([constant(inputs), constant(thetas)]).eval()
    np.testing.assert_allclose(inputs, outputs, rtol=1e-6)
Example #7
0
def gen_data():
    """
    Generate toy data, from Bishop p273.

    :returns:
        - X : np.ndarray, shape=(n_batch, 1)
            Input sequence
        - t : np.ndarray, shape=(n_batch, 1)
            Target sequence
    """
    t = np.random.uniform(low=0.1, high=0.9, size=SHAPE)
    noise = np.random.uniform(low=-0.1, high=0.1, size=SHAPE)
    X = t + (0.3 * np.sin(2 * np.pi * t)) + noise
    return floatX(X), floatX(t)
Example #8
0
def vgg_prepare_image(im, image_mean, image_size=224):
        
    # Scale the image
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (image_size, w * image_size / h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h * image_size / w, image_size), preserve_range=True)

    # Crop the central 224x224
    h, w, _ = im.shape
    im = im[h//2 - image_size // 2:h // 2 + image_size // 2, w // 2 - image_size // 2:w // 2 + image_size // 2]

    # Convert to uint8 type
    rawim = np.copy(im).astype('uint8')

    # (height, width, channel) to (channel, height, width)
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Images come in RGB channel order, while VGG net expects BGR:
    im = im[::-1, :, :]
    
    # If necessary, add 2 axes to the mean so that it will broadcast when we subtract
    # it from the image
    if len(image_mean.shape) == 1:
        image_mean = image_mean[:,None,None]

    # Subtract the mean
    im = im - image_mean
    
    # Add the sample axis 
    im = im[np.newaxis]
    
    return rawim, floatX(im)
Example #9
0
    def __init__(self, photo_string, art_string, content=0.001, style=0.2e6, total_var=0.1e-7):
        # load network
        self.net = build_model()
        # load layers
        layers = ['conv4_2', 'conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
        layers = {k: self.net[k] for k in layers}
        self.layers = layers
        # load images
        im = plt.imread(art_string)
        self.art_raw, self.art = prep_image(im)
        im = plt.imread(photo_string)
        self.photo_raw, self.photo = prep_image(im)
        # precompute layer activations for photo and artwork
        input_im_theano = T.tensor4()
        self._outputs = lasagne.layers.get_output(layers.values(), input_im_theano)
        self.photo_features = {k: theano.shared(output.eval({input_im_theano: self.photo}))
                          for k, output in zip(layers.keys(), self._outputs)}
        self.art_features = {k: theano.shared(output.eval({input_im_theano: self.art}))
                        for k, output in zip(layers.keys(), self._outputs)}
        # Get expressions for layer activations for generated image
        self.generated_image = theano.shared(floatX(np.random.uniform(-128, 128, (1, 3, IMAGE_W, IMAGE_W))))

        gen_features = lasagne.layers.get_output(layers.values(), self.generated_image)
        gen_features = {k: v for k, v in zip(layers.keys(), gen_features)}
        self.gen_features = gen_features

        # set the weights of the regularizers
        self._content, self._style, self._total_var = content, style, total_var
Example #10
0
def load_image(file_name):
    """
    Load and preprocess an image
    """
    MEAN_VALUE = numpy.array([103.939, 116.779, 123.68]).reshape((3,1,1))
    image = Image.open(file_name)
    im = numpy.array(image)

    # Resize so smallest dim = 256, preserving aspect ratio
    if len(im.shape) == 2:
        im = im[:, :, numpy.newaxis]
        im = numpy.repeat(im, 3, axis=2)
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)

    # Central crop to 224x224
    h, w, _ = im.shape
    im = im[h//2-112:h//2+112, w//2-112:w//2+112]

    rawim = numpy.copy(im).astype('uint8')

    # Shuffle axes to c01
    im = numpy.swapaxes(numpy.swapaxes(im, 1, 2), 0, 1)

    # Convert to BGR
    im = im[::-1, :, :]

    im = im - MEAN_VALUE
    return rawim, floatX(im[numpy.newaxis])
Example #11
0
def prep_image(url, mean_image):
    ext = url.split('.')[-1]
    try:
        im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)

        h, w, _ = im.shape
        if h < w:
            im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)
        else:
            im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)
        # Central crop to 224x224
        h, w, _ = im.shape
        im = im[h//2-112:h//2+112, w//2-112:w//2+112]
        rawim = np.copy(im).astype('uint8')

        # Shuffle axes to c01
        im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

        # Convert to BGR
        #im = im[::-1, :, :]
        im = im - mean_image[:,None,None]
        return rawim, floatX(im[np.newaxis])

    except:
        # Abort
        print "skipping url " + url
        return None, np.zeros((1,))
Example #12
0
    def test_transform_thin_plate_spline_variable_input(self):
        import lasagne
        from lasagne.utils import floatX
        from theano.tensor import constant

        x = np.random.random((10, 3, 28, 28)).astype('float32')
        x_sym = theano.tensor.tensor4()

        l_in = lasagne.layers.InputLayer((None, 3, None, 28))
        l_loc = lasagne.layers.DenseLayer(
                lasagne.layers.ReshapeLayer(l_in, ([0], 3*28*28)),
                num_units=32)
        l_trans = lasagne.layers.TPSTransformerLayer(
                l_in, l_loc, precompute_grid='auto')

        # check that shape propagation works
        assert l_trans.output_shape[0] is None
        assert l_trans.output_shape[1] == 3
        assert l_trans.output_shape[2] is None
        assert l_trans.output_shape[3] == 28

        # check that data propagation works
        dest_offset = np.zeros(shape=(10, 32))
        inputs = floatX(np.arange(np.prod(x.shape)).reshape(x.shape))
        outputs = l_trans.get_output_for([constant(inputs),
                                          constant(dest_offset)]).eval()
        np.testing.assert_allclose(inputs, outputs, atol=5e-4)
def prep_image(url,mean_image):
    ext = url.split('.')[-1]
    im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)
    # Resize so smallest dim = 256, preserving aspect ratio
#     print url
    if im.ndim < 3:
        return None, None
    
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)

    # Central crop to 224x224
    h, w, _ = im.shape
    im = im[h//2-112:h//2+112, w//2-112:w//2+112]
    rawim = np.copy(im).astype('uint8')
    
    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    
    # Convert to BGR
    #im = im[::-1, :, :]
    im = im - mean_image[:,None,None]
    return rawim, floatX(im[np.newaxis])
Example #14
0
def prep_image(fn, image_mean):
    im = scipy.ndimage.imread(fn, mode='RGB')

    # Resize so smallest dim = 256, preserving aspect ratio

    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)

    # Central crop to 224x224
    h, w, _ = im.shape
    im = im[h//2-112:h//2+112, w//2-112:w//2+112]

    rawim = np.copy(im).astype('uint8')

    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Convert to BGR
    im = im[::-1, :, :]

    im = im - image_mean

    return rawim, floatX(im[np.newaxis])
Example #15
0
def parallel_img_preprocess(url):
    '''
    Reshapes picture and prepare it for passing to the neural net input
    '''
    ext = url.split('.')[-1]
    im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)
    # Resize so smallest dim = 256, preserving aspect ratio
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)

    # Central crop to 224x224
    h, w, _ = im.shape
    im = im[h//2-112:h//2+112, w//2-112:w//2+112]

    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Convert to BGR
    im = im[::-1, :, :]
    
    # returns only preprocessed image
    return floatX(im[np.newaxis])
Example #16
0
def prep_image(im):
    if im.max() <= 1:		# pixel values must be float32 in [0,255]
        im *= 255.0
    
    if len(im.shape) == 2:
        im = im[:, :, np.newaxis]
        im = np.repeat(im, 3, axis=2)

    # height is span of head
    h, w, _ = im.shape
    im = skimage.transform.resize(im, (IMAGE_W, w*IMAGE_W/h), preserve_range=True)
    # average pixel values (RGB) in VGG training set
    MEAN_VALUES = np.array([129.1863,104.7624,93.5940]).reshape((1,1,3))
    # make square
    if im.shape[0] >= im.shape[1]:
        # print 'L', np.tile(MEAN_VALUES, (IMAGE_W, (IMAGE_W-im.shape[1])/2, 1)).shape
        # print 'C', im.shape
        # print 'R', np.tile(MEAN_VALUES, (IMAGE_W, IMAGE_W - (IMAGE_W-im.shape[1])/2 - im.shape[1], 1)).shape
        im = np.hstack((np.tile(MEAN_VALUES, (IMAGE_W, (IMAGE_W-im.shape[1])/2, 1)), im, np.tile(MEAN_VALUES, (IMAGE_W, IMAGE_W - (IMAGE_W-im.shape[1])/2 - im.shape[1], 1))))
    else:
        im = im[:,:IMAGE_W,:]
    
    rawim = np.copy(im).astype('uint8')
    
    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    MEAN_VALUES = np.array([129.1863,104.7624,93.5940]).reshape((3,1,1))
    
    # subtract RGB mean
    im = im - MEAN_VALUES

    # Convert RGB to BGR
    im = im[::-1, :, :]

    return rawim, floatX(im[np.newaxis])
Example #17
0
def preprocess_image(im_file):
    """
    preprocess image to 256 for neural network to work
    """
    # ways to get image on the web
    # import io
    # ext = url.split('.')[-1]
    # im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)

    im = plt.imread(open(im_file, 'r'))

    # resize to smalled dimension of 256 while preserving aspect ratio
    h, w, c = im.shape

    if h < w:
        im = skimage.transform.resize(im, (256, w/h*256), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h/w*256, 256), preserve_range=True)

    h, w, c = im.shape

    # central crop to 224x224
    im = im[h//2-112:h//2+112, w//2-112:w//2+112]

    rawim = np.copy(im).astype('uint8')

    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Convert to BGR
    im = im[::-1, :, :]

    im = im - MEAN_IMAGE
    return rawim, floatX(im[np.newaxis])
Example #18
0
    def transfer_style(self, init=None, saveplot=True):
        if init is None:
            self.generated_image.set_value(floatX(np.random.uniform(-128, 128, (1, 3, IMAGE_W, IMAGE_W))))
        else:
            im = plt.imread(init)
            _, tmp_im = prep_image(im)
            self.generated_image.set_value(tmp_im)

        x0 = self.generated_image.get_value().astype('float64')

        # Optimize
        print()
        print("Starting optimization.")
        scipy.optimize.fmin_l_bfgs_b(self.eval_loss, x0.flatten(), fprime=self.eval_grad, maxfun=40)
        print()
        print("Done")
        x0 = self.generated_image.get_value().astype('float64')
        im = deprocess(x0)

        if saveplot:
            im = self.transfer_style(init=init)
            plt.gca().xaxis.set_visible(False)
            plt.gca().yaxis.set_visible(False)
            plt.imshow(im)
            plt.savefig("style.jpg")
        return im
Example #19
0
def prep_image(im):
    if len(im.shape) == 2:
        im = im[:, :, np.newaxis]
        im = np.repeat(im, 3, axis=2)
    # Resize so smallest dim = 224, preserving aspect ratio
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (224, w*224/h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h*224/w, 224), preserve_range=True)

    # Central crop to 224x224
    h, w, _ = im.shape
    im = im[h//2-112:h//2+112, w//2-112:w//2+112]
    
    rawim = np.copy(im).astype('uint8')
    
    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    
    # Convert to BGR
    im = im[::-1, :, :]

    im = im - MEAN_VALUES
    return rawim, floatX(im[np.newaxis])
def basic(url):
    '''
    INPUT: String
    OUTPUT: Ndarray, Ndarray

    Takes path to image file and returns 
    '''
    # Download/load image
    ext = url.split('.')[-1]
    im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)

    # Resize to 256x256
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)

    # Resize to 224x224, taking center
    h, w, _ = im.shape
    im = im[h//2-112:h//2+112, w//2-112:w//2+112]

    rawim = np.copy(im).astype('uint8')
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    im = im[::-1, :, :]

    # Import mean image
    mean_image = joblib.load('/home/ubuntu/vintage-classifier/pkls/mean_image.pkl')
    im = im - mean_image
    return rawim, floatX(im[np.newaxis])
def prep_image(im, IMAGE_W, IMAGE_H, BGR=BGR):
    if len(im.shape) == 2:
        im = im[:, :, np.newaxis]
        im = np.repeat(im, 3, axis=2)
    h, w, _ = im.shape
    if h*IMAGE_W < w*IMAGE_H:
        im = skimage.transform.resize(im, (IMAGE_H, w*IMAGE_H//h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h*IMAGE_W//w, IMAGE_W), preserve_range=True)        

    # Central crop
    h, w, _ = im.shape
    im = im[h//2-IMAGE_H//2:h//2+IMAGE_H//2, w//2-IMAGE_W//2:w//2+IMAGE_W//2]
    
    rawim = im.astype('uint8')
    
    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    
    # Convert RGB to BGR
    if not BGR:
        im = im[::-1, :, :]

    im = im - MEAN_VALUES
    return rawim, floatX(im[np.newaxis])
Example #22
0
def eval_loss(x0, width):
    # Helper function to interface with scipy.optimize
    
    x0 = floatX(x0.reshape((1, 3, width, width)))
    generated.set_value(x0)
    
    return f_loss().astype('float64')
Example #23
0
 def sample(self, shape):
     # eg k^2 for conv2d
     receptive_field_size = np.prod(shape[2:])
     c = shape[1]  # input channels
     nl = c * receptive_field_size
     std = np.sqrt(2.0 / (nl))
     return floatX(np.random.normal(0, std, size=shape))
Example #24
0
def eval_grad(x0, width):
    # Helper function to interface with scipy.optimize
    
    x0 = floatX(x0.reshape((1, 3, width, width)))
    generated.set_value(x0)
    
    return np.array(f_grad()).flatten().astype('float64')
Example #25
0
def prepare_image(img, width, means):
    
    # if not RGB, force 3 channels
    if len(img.shape) == 2:
        img = img[:, :, np.newaxis]
        img = np.repeat(img, 3, axis=2)
    h, w, _ = img.shape
    if h < w:
        img = skimage.transform.resize(img, (width, w*width/h), preserve_range=True)
    else:
        img = skimage.transform.resize(img, (h*width/w, width), preserve_range=True)

    # crop the center
    h, w, _ = img.shape
    img = img[h//2 - width//2:h//2 + width//2, w//2 - width//2:w//2 + width//2]
    
    rawim = np.copy(img).astype('uint8')
    
    # shuffle axes to c01
    img = np.swapaxes(np.swapaxes(img, 1, 2), 0, 1)
    
    # convert RGB to BGR
    img = img[::-1, :, :]
    
    # zero mean scaling
    img = img - means
    
    return rawim, floatX(img[np.newaxis])
Example #26
0
def prep_image(img_path, nnet=nnet, local_img=True):
    ext = img_path.split('.')[-1]

    if local_img:
        im = plt.imread(img_path)
    else:
        im = plt.imread(io.BytesIO(urllib.urlopen(img_path).read()), ext)

    # Resize so smallest dim = 256, preserving aspect ratio
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)

    # Central crop to 224x224
    h, w, _ = im.shape
    im = im[h//2-112:h//2+112, w//2-112:w//2+112]

    rawim = np.copy(im).astype('uint8')

    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Convert to BGR
    im = im[::-1, :, :]

    im = im - MEAN_IMAGE
    return rawim, floatX(im[np.newaxis])
Example #27
0
    def test_get_all_params_with_unwrap_shared(self):
        from lasagne.layers import (InputLayer, DenseLayer, get_all_params)
        import theano.tensor as T
        from lasagne.utils import floatX

        l1 = InputLayer((10, 20))
        l2 = DenseLayer(l1, 30)

        W1 = theano.shared(floatX(numpy.zeros((30, 2))))
        W2 = theano.shared(floatX(numpy.zeros((2, 40))))
        W_expr = T.dot(W1, W2)
        l3 = DenseLayer(l2, 40, W=W_expr, b=None)

        l2_params = get_all_params(l2)
        assert get_all_params(l3) == l2_params + [W1, W2]
        assert get_all_params(l3, unwrap_shared=False) == l2_params + [W_expr]
Example #28
0
 def sample(self, shape):
     if len(shape) != 2:
         raise ValueError('The OneHot initializer '
                          'only works with 2D arrays.')
     M = np.min(shape)
     arr = np.zeros(shape)
     arr[:M, :M] += 1 * np.eye(M)
     return floatX(arr)
Example #29
0
 def D(x):
     batch_size, seq_length, _ = self.input_layer.get_output_shape()
     shape = (seq_length, batch_size, 4*self.num_units)
     retain_prob = 1 - self.dropout_rate
     if self.dropout_rescale:
         x /= retain_prob
     return x * utils.floatX(
         _srng.binomial(shape, p=retain_prob, dtype='int32'))
def load_data(siamese_dataset, siamese_dataset_valid):
    p = Preprocessing(resize_to=70, half_size=25)
    im1 = p.preprocess('../FinalCapstoneData/flat_data/' + siamese_dataset[0][0])
    im1 = floatX(im1[np.newaxis])
    X_train = im1.reshape(1, 3, 2*half_size, 2*half_size)
    y_train = [siamese_dataset[0][1]]
    c = 0
    for image in siamese_dataset[1:]:
            im = p.preprocess('../FinalCapstoneData/flat_data/' + image[0])
            im = floatX(im[np.newaxis])
            X_train = np.concatenate((X_train, im.reshape(1, 3, 2*half_size, 2*half_size)), axis=0)
            y_train.append(image[1])
            c+=1
            print c
    print "x :", X_train.shape
    print np.array(y_train).size
    y_train = np.array(y_train)

    p_val = Preprocessing(resize_to=70, half_size=25)
    im1_val = p_val.preprocess('../FinalCapstoneData/flat_data/' + siamese_dataset_valid[0][0])
    im1_val = floatX(im1_val[np.newaxis])
    X_valid = im1_val.reshape(1, 3, 2*half_size, 2*half_size)
    y_valid = [siamese_dataset_valid[0][1]]
    c = 0
    for image_val in siamese_dataset_valid[1:]:
            im_val = p_val.preprocess('../FinalCapstoneData/flat_data/' + image_val[0])
            im_val = floatX(im_val[np.newaxis])
            X_valid = np.concatenate((X_valid, im_val.reshape(1, 3, 2*half_size, 2*half_size)), axis=0)
            y_valid.append(image_val[1])
            c+=1
            print c
    print "x :", X_valid.shape
    print np.array(y_valid).size
    y_valid = np.array(y_valid)

    return dict(
        X_train=lasagne.utils.floatX(X_train),
        y_train=y_train.astype('int32'),
        X_valid=lasagne.utils.floatX(X_valid),
        y_valid=y_valid.astype('int32'),
        num_examples_train=X_train.shape[0],
        num_examples_valid=X_valid.shape[0],
        input_height=X_train.shape[2],
        input_width=X_train.shape[3],
        )
Example #31
0
    def test_get_output_for(self, pool_size, stride):
        try:
            input = floatX(np.random.randn(8, 16, 17, 13))
            input_layer = self.input_layer(input.shape)
            input_theano = theano.shared(input)
            result = self.layer(
                input_layer,
                (pool_size, pool_size),
                (stride, stride),
                ignore_border=False,
            ).get_output_for(input_theano)

            result_eval = result.eval()
            numpy_result = max_pool_2d(
                input, (pool_size, pool_size), (stride, stride))

            assert np.all(numpy_result.shape == result_eval.shape)
            assert np.allclose(result_eval, numpy_result)
        except NotImplementedError:
            pytest.skip()
Example #32
0
    def test_get_output_for_fast(self, pool_dims, fixed):
        try:
            input = floatX(np.random.randn(8, 16, 17, 13))
            if fixed:
                input_layer = self.input_layer(input.shape)
            else:
                input_layer = self.input_layer((None, None, None, None))
            input_theano = theano.shared(input)
            layer = self.layer(input_layer, pool_dims)

            result = layer.get_output_for(input_theano)

            result_eval = result.eval()
            numpy_result = spatial_pool(input, pool_dims)

            assert result_eval.shape == numpy_result.shape
            assert np.allclose(result_eval, numpy_result)
            assert result_eval.shape[2] == layer.output_shape[2]
        except NotImplementedError:
            pytest.skip()
Example #33
0
def prep_image(im):
    if im.max() <= 1:  # pixel values must be float32 in [0,255]
        im *= 255.0

    if len(im.shape) == 2:
        im = im[:, :, np.newaxis]
        im = np.repeat(im, 3, axis=2)

    # height is span of head
    h, w, _ = im.shape
    im = skimage.transform.resize(im, (IMAGE_W, w * IMAGE_W / h),
                                  preserve_range=True)
    # average pixel values (RGB) in VGG training set
    MEAN_VALUES = np.array([129.1863, 104.7624, 93.5940]).reshape((1, 1, 3))
    # make square
    if im.shape[0] >= im.shape[1]:
        # print 'L', np.tile(MEAN_VALUES, (IMAGE_W, (IMAGE_W-im.shape[1])/2, 1)).shape
        # print 'C', im.shape
        # print 'R', np.tile(MEAN_VALUES, (IMAGE_W, IMAGE_W - (IMAGE_W-im.shape[1])/2 - im.shape[1], 1)).shape
        im = np.hstack(
            (np.tile(MEAN_VALUES,
                     (IMAGE_W, (IMAGE_W - im.shape[1]) / 2, 1)), im,
             np.tile(MEAN_VALUES,
                     (IMAGE_W, IMAGE_W -
                      (IMAGE_W - im.shape[1]) / 2 - im.shape[1], 1))))
    else:
        im = im[:, :IMAGE_W, :]

    rawim = np.copy(im).astype('uint8')

    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    MEAN_VALUES = np.array([129.1863, 104.7624, 93.5940]).reshape((3, 1, 1))

    # subtract RGB mean
    im = im - MEAN_VALUES

    # Convert RGB to BGR
    im = im[::-1, :, :]

    return rawim, floatX(im[np.newaxis])
Example #34
0
def fast_gradient_perturbation(inputs,
                               logits,
                               labels=None,
                               epsilon=0.3,
                               ord=np.inf):
    epsilon = floatX(epsilon)
    if labels is None:
        raise ValueError
    nll = categorical_crossentropy(logits, labels)
    grad = T.grad(nll.sum(), inputs, consider_constant=[labels])
    if ord == np.inf:
        perturbation = T.sgn(grad)
    elif ord == 1:
        sum_ind = list(range(1, inputs.ndim))
        perturbation = grad / T.sum(T.abs_(grad), axis=sum_ind, keepdims=True)
    elif ord == 2:
        sum_ind = list(range(1, inputs.ndim))
        perturbation = grad / T.sqrt(
            T.sum(grad**2, axis=sum_ind, keepdims=True))
    perturbation *= epsilon
    return gradient.disconnected_grad(perturbation)
def prep_image(url,avg_im):
    # Read URL
    ext = url.split('.')[-1]
    im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)

    raw_image = np.copy(misc.imresize(im, (224, 224, 3)))
    
    # Note that in matlab,bilinear interpolation (not default) must be used
    # for imresize
    im = im.astype(np.float32)
    im = tns.resize(im, (224, 224, 3), preserve_range=True)

    # Subtract the average image
    im[:, :, 0] = im[:, :, 0] - avg_im[0]
    im[:, :, 1] = im[:, :, 1] - avg_im[1]
    im[:, :, 2] = im[:, :, 2] - avg_im[2]

    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    return raw_image, floatX(im[np.newaxis])
Example #36
0
    def test_get_output_for(self, scale_factor, mode):
        input = floatX(np.random.randn(8, 16, 17, 13, 15))
        input_layer = self.input_layer(input.shape)
        input_theano = theano.shared(input)
        result = self.layer(
            input_layer,
            (scale_factor, scale_factor, scale_factor),
            mode,
        ).get_output_for(input_theano)

        result_eval = result.eval()
        if mode in {'repeat', None}:
            numpy_result = upscale_3d(input, (scale_factor, scale_factor,
                                              scale_factor))
        elif mode == 'dilate':
            numpy_result = upscale_3d_dilate(input, (scale_factor,
                                                     scale_factor,
                                                     scale_factor))

        assert np.all(numpy_result.shape == result_eval.shape)
        assert np.allclose(result_eval, numpy_result)
Example #37
0
    def test_get_output_for_kaiming(self, pool_dims, fixed, mode):
        try:
            input = floatX(np.random.randn(8, 16, 17, 13))
            if fixed:
                input_layer = self.input_layer(input.shape)
            else:
                input_layer = self.input_layer((None, None, None, None))
            input_theano = theano.shared(input)
            layer = self.layer(input_layer, pool_dims,
                               mode=mode, implementation='kaiming')

            result = layer.get_output_for(input_theano)

            result_eval = result.eval()
            numpy_result = np_spatial_pool_kaiming(input, pool_dims, mode)

            assert result_eval.shape == numpy_result.shape
            assert np.allclose(result_eval, numpy_result, atol=1e-7)
            assert result_eval.shape[2] == layer.output_shape[2]
        except NotImplementedError:
            pytest.skip()
Example #38
0
    def test_get_output_for_ignoreborder(self, pool_size, stride, pad):
        try:
            input = floatX(np.random.randn(5, 8, 16, 17, 13))
            input_layer = self.input_layer(input.shape)
            input_theano = theano.shared(input)

            result = self.layer(
                input_layer,
                pool_size,
                stride,
                pad,
            ).get_output_for(input_theano)

            result_eval = result.eval()
            numpy_result = max_pool_3d_ignoreborder(input, [pool_size] * 3,
                                                    [stride] * 3, [pad] * 3)

            assert np.all(numpy_result.shape == result_eval.shape)
            assert np.allclose(result_eval, numpy_result)
        except NotImplementedError:
            pytest.skip()
Example #39
0
def prep_image(url, mean_image):
    '''
    Input: Take url of image (Typically on s3, but can be any url)
    Take a url of an image.
    Resize image so the smallest dimension (h or w) is 256.
    Center crop the largest dimension 256 pixels.
    Output: 256x256x3 image
    '''
    ext = url.split('.')[-1]
    print url
    im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)
    # Resize so smallest dim = 256, preserving aspect ratio
    if len(im.shape) < 3:
        im = np.array((im, im, im))
        print im.shape
        im = np.swapaxes(im, 0, 1)
        im = np.swapaxes(im, 1, 2)
        print 'new shape: ', im.shape
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (256, w * 256 / h),
                                      preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h * 256 / w, 256),
                                      preserve_range=True)

    # Central crop to 224x224
    h, w, _ = im.shape
    im = im[h // 2 - 112:h // 2 + 112, w // 2 - 112:w // 2 + 112]

    rawim = np.copy(im).astype('uint8')

    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Convert to BGR
    im = im[::-1, :, :]

    im = im - mean_image
    return rawim, floatX(im[np.newaxis])
Example #40
0
def exp_a(name):
    source = RealApplianceSource(
        filename='/data/dk3810/ukdale.h5',
        appliances=[['fridge freezer', 'fridge', 'freezer'],
                    'hair straighteners', 'television'
                    # 'dish washer',
                    # ['washer dryer', 'washing machine']
                    ],
        max_appliance_powers=[300, 500, 200],  #, 2500, 2400],
        on_power_thresholds=[20, 20, 20],  #, 20, 20],
        max_input_power=1000,
        min_on_durations=[60, 60, 60],  #, 1800, 1800],
        window=("2013-06-01", "2014-07-01"),
        seq_length=1000,
        output_one_appliance=False,
        boolean_targets=False,
        min_off_duration=60,
        train_buildings=[1],
        validation_buildings=[1],
        skip_probability=0,
        n_seq_per_batch=50)

    net = Net(experiment_name=name,
              source=source,
              save_plot_interval=SAVE_PLOT_INTERVAL,
              loss_function=crossentropy,
              updates=partial(nesterov_momentum, learning_rate=1.0),
              layers_config=[{
                  'type': LSTMLayer,
                  'num_units': 50,
                  'W_in_to_cell': Constant(50),
                  'b_ingate': floatX(np.linspace(-49, 0, 50)),
                  'gradient_steps': GRADIENT_STEPS,
                  'peepholes': False
              }, {
                  'type': DenseLayer,
                  'num_units': source.n_outputs,
                  'nonlinearity': sigmoid
              }])
    return net
def prep_image(filepath):
    # ext = url.split('.')[-1]
    # im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)
    im = plt.imread(filepath)
    # Resize so smallest dim = 256, preserving aspect ratio
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (256, w * 256 / h),
                                      preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h * 256 / w, 256),
                                      preserve_range=True)
    # Central crop to 224x224
    h, w, _ = im.shape
    im = im[h // 2 - 112:h // 2 + 112, w // 2 - 112:w // 2 + 112]
    rawim = np.copy(im).astype('uint8')
    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    # Convert to BGR
    im = im[::-1, :, :]
    im = im - MEAN_IMAGE
    return rawim, floatX(im[np.newaxis])
def download_images(mdl, synsets=SYNSETS):
    imgs = []
    base_url = 'http://www.image-net.org/api/text/imagenet.synset.geturls?wnid='
    for s in synsets:
        url = base_url + s
        rq = requests.get(url)
        if rq.status_code != 200:
            logger.debug("Request status: %d" % rq.status_code)
            raise ValueError
        links = [x.strip() for x in rq.text.split('\n')]
        for i in range(len(links)):
            url = links[i]
            rq = requests.get(url, stream=True)
            if rq.status_code != 200:
                if i == len(links) - 1:
                    raise ValueError
                else:
                    continue
            try:
                img = Image.open(StringIO(rq.content))
                img = np.array(img)
                img = img[:, :, ::-1]

                img = img.transpose((2, 0, 1))
                img = mdl.preprocess(img)

                # img = cv2.resize(img, (224, 224))
                # img = img.transpose((2, 0, 1))
                # img = img - MEAN_VALUE
                imgs.append(img)
                break
            except IndexError:
                logger.debug("Error reading image")
                if i == len(links) - 1:
                    raise ValueError
                else:
                    continue
    imgs = floatX(imgs)
    return imgs
def adam(lr, tparams, grads, inp, cost, hard_attn_up):
    from lasagne import utils
    from theano import tensor
    gshared = [
        theano.shared(p.get_value() * utils.floatX(0.), name='%s_grad' % k)
        for k, p in tparams.iteritems()
    ]
    gsup = [(gs, g) for gs, g in zip(gshared, grads)]

    f_grad_shared = theano.function(inp, cost, updates=gsup + hard_attn_up)
    lr0 = 0.002
    b1 = 0.1
    b2 = 0.001
    e = 1e-8
    updates = []
    i = theano.shared(utils.floatX(0.))
    i_t = i + utils.floatX(1.)
    fix1 = utils.floatX(1.) - b1**(i_t)
    fix2 = utils.floatX(1.) - b2**(i_t)
    lr_t = lr0 * (tensor.sqrt(fix2) / fix1)

    for p, g in zip(tparams.values(), gshared):
        m = theano.shared(p.get_value() * utils.floatX(0.))
        v = theano.shared(p.get_value() * utils.floatX(0.))
        m_t = (b1 * g) + (1. - b1) * m
        # print "\n\ng: {}, m: {}".format(g.type, m.type)
        v_t = (b2 * tensor.sqr(g)) + (1. - b2) * v
        updates.append((m, m_t))
        updates.append((v, v_t))
        # print "\n\nm: {}, m_t: {}, v: {}, v_t: {}, g:{}".format(m.type, m_t.type, v.type, v_t.type,g.type)
        g_t = m_t / (tensor.sqrt(v_t) + e)
        p_t = p - (lr_t * g_t)
        updates.append((p, p_t))
    updates.append((i, i_t))
    print "\n\ni's type: {}, i_t's type: {}".format(i.type, i_t.type)

    f_update = theano.function([lr], [],
                               updates=updates,
                               on_unused_input='ignore')

    return f_grad_shared, f_update
Example #44
0
def adam(loss_or_grads,
         params,
         learning_rate=0.001,
         beta1=0.9,
         beta2=0.999,
         epsilon=1e-8,
         bias_correction=True):
    all_grads = get_or_compute_grads(loss_or_grads, params)
    t_prev = theano.shared(utils.floatX(0.))
    updates = OrderedDict()

    # Using theano constant to prevent upcasting of float32
    one = T.constant(1)

    t = t_prev + 1
    if bias_correction:
        a_t = learning_rate * T.sqrt(one - beta2**t) / (one - beta1**t)
    else:
        a_t = learning_rate

    for param, g_t in zip(params, all_grads):
        value = param.get_value(borrow=True)
        m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
                               broadcastable=param.broadcastable)
        v_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
                               broadcastable=param.broadcastable)

        m_t = beta1 * m_prev + (one - beta1) * g_t
        v_t = beta2 * v_prev + (one - beta2) * g_t**2

        step = a_t * m_t / (T.sqrt(v_t) + epsilon)

        updates[m_prev] = m_t
        updates[v_prev] = v_t
        updates[param] = param - step

    updates[t_prev] = t
    return updates
    def adam(self,
             cost,
             params,
             learning_rate=0.001,
             beta1=0.9,
             beta2=0.999,
             epsilon=1e-8):

        all_grads = T.grad(cost=cost, wrt=params)
        all_grads = total_norm_constraint(all_grads, 10)

        grad_norm = T.sqrt(sum(map(lambda x: T.sqr(x).sum(), all_grads)))
        not_finite = T.or_(T.isnan(grad_norm), T.isinf(grad_norm))

        t_prev = theano.shared(utils.floatX(0.))
        updates = OrderedDict()

        t = t_prev + 1
        a_t = learning_rate * T.sqrt(1 - beta2**t) / (1 - beta1**t)

        for param, g_t in zip(params, all_grads):
            g_t = T.switch(not_finite, 0.1 * param, g_t)
            value = param.get_value(borrow=True)
            m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
                                   broadcastable=param.broadcastable)
            v_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
                                   broadcastable=param.broadcastable)

            m_t = beta1 * m_prev + (1 - beta1) * g_t
            v_t = beta2 * v_prev + (1 - beta2) * g_t**2
            step = a_t * m_t / (T.sqrt(v_t) + epsilon)

            updates[m_prev] = m_t
            updates[v_prev] = v_t
            updates[param] = param - step

        updates[t_prev] = t
        return updates
Example #46
0
def step_adam(grads, params, t_prev, m_prev_list, v_prev_list,
              learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8):
    """ Modified from lasagne.updates.adam """
    updates = OrderedDict()

    # Using numpy constant to prevent upcasting of float32
    one = floatX(np.array(1))

    t = t_prev.get_value() + 1
    a_t = learning_rate*np.sqrt(one-beta2**t)/(one-beta1**t)

    for param, g_t, m_prev, v_prev in \
            zip(params, grads, m_prev_list, v_prev_list):
        m_t = beta1*m_prev.get_value() + (one-beta1)*g_t
        v_t = beta2*v_prev.get_value() + (one-beta2)*g_t**2
        step = a_t*m_t/(np.sqrt(v_t) + epsilon)

        updates[m_prev] = m_t
        updates[v_prev] = v_t
        updates[param] = param.get_value() - step

    updates[t_prev] = t
    return updates
Example #47
0
def prep_image(im):
    if len(im.shape) == 2:
        im = im[:, :, np.newaxis]
        im = np.repeat(im, 3, axis=2)
    # Resize so smallest dim = 224, preserving aspect ratio
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (224, w * 224 / h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h * 224 / w, 224), preserve_range=True)

    # Central crop to 224x224
    h, w, _ = im.shape
    im = im[h // 2 - 112:h // 2 + 112, w // 2 - 112:w // 2 + 112]

    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Convert to BGR
    im = im[::-1, :, :]

    im = im - MEAN_VALUES
    return floatX(im[np.newaxis])
Example #48
0
def vgg_prepare_image(im, image_mean, image_size=224):

    # Scale the image
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (image_size, w * image_size / h),
                                      preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h * image_size / w, image_size),
                                      preserve_range=True)

    # Crop the central 224x224
    h, w, _ = im.shape
    im = im[h // 2 - image_size // 2:h // 2 + image_size // 2,
            w // 2 - image_size // 2:w // 2 + image_size // 2]

    # Convert to uint8 type
    rawim = np.copy(im).astype('uint8')

    # (height, width, channel) to (channel, height, width)
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Images come in RGB channel order, while VGG net expects BGR:
    im = im[::-1, :, :]

    # If necessary, add 2 axes to the mean so that it will broadcast when we subtract
    # it from the image
    if len(image_mean.shape) == 1:
        image_mean = image_mean[:, None, None]

    # Subtract the mean
    im = im - image_mean

    # Add the sample axis
    im = im[np.newaxis]

    return rawim, floatX(im)
def sgdWithLearningRateDecay(loss_or_grads, params, learningRate,
                             learningRateDecay):

    from lasagne.updates import get_or_compute_grads

    import theano.tensor as T
    import theano

    from collections import OrderedDict
    from lasagne import utils

    grads = get_or_compute_grads(loss_or_grads, params)
    updates = OrderedDict()

    t_prev = theano.shared(utils.floatX(0.))
    one = T.constant(1)

    t = t_prev + 1

    clr = learningRate / (1 + t * learningRateDecay)

    # http://leon.bottou.org/publications/pdf/tricks-2012.pdf
    # for example suggests (section 5.2)
    # "use learning rates of the form
    #  gamma_t = gamma_0 / (1 + gamma_0 * lambda * t)
    # determine the best gamma_0 using a small training
    # data sample"
    # (lambda / 2 is the coefficient of the weights norm
    #  of L2 regularization)

    for param, grad in zip(params, grads):
        updates[param] = param - clr * grad

    updates[t_prev] = t

    return updates
Example #50
0
    def test_adaptivegaussian_layer(self, filter_size, init_std):
        input = floatX(np.ones((10, 1, 1000)))

        # test the case with one channel
        assert (input.shape[1] == 1)

        input_layer = self.input_layer(input.shape)
        input_theano = theano.shared(input)

        layer = self.adaptivegaussian_layer(input_layer, filter_size, init_std)
        layer_result = layer.get_output_for(input_theano).eval()

        # theano gaussian filter
        theano_gf = layer.W.eval()

        # numpy gaussian filter
        np_gf = self.make_numpy_gaussian_filter_v2(filter_size, init_std)

        numpy_result = self.convolve_numpy_array(input, np_gf)

        assert np.all(numpy_result.shape == layer.output_shape)
        assert np.all(numpy_result.shape == layer_result.shape)
        assert np.allclose(theano_gf[0, 0, :], np_gf[0, 0, :])
        assert np.allclose(numpy_result, layer_result)
Example #51
0
def prep_image(im):
    if len(im.shape) == 2:
        im = im[:, :, np.newaxis]
        im = np.repeat(im, 3, axis=2)
    h, w, _ = im.shape
    if h < w:
        im = skimage.transform.resize(im, (IMAGE_W, w*IMAGE_W/h), preserve_range=True)
    else:
        im = skimage.transform.resize(im, (h*IMAGE_W/w, IMAGE_W), preserve_range=True)

    # Central crop
    h, w, _ = im.shape
    im = im[h//2-IMAGE_W//2:h//2+IMAGE_W//2, w//2-IMAGE_W//2:w//2+IMAGE_W//2]
    
    rawim = np.copy(im).astype('uint8')
    
    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    
    # Convert RGB to BGR
    im = im[::-1, :, :]

    im = im - MEAN_VALUES
    return rawim, floatX(im[np.newaxis])
Example #52
0
        rampdown_value = model.rampdown(epoch)
        
        learning_rate = rampdown_value * model.learning_rate_max
        adam_beta1 = rampdown_value * model.adam_beta1 + (1.0 - rampdown_value) * model.rampdown_beta1_target

        #unsup_weight_var
        unsup_weight = rampup_value * scaled_unsup_weight_max
        if epoch == 0:
            unsup_weight = 0.0
        
        # Initialize epoch predictions for temporal ensembling.

        if model.network_type == 'tempens':
            epoch_predictions = np.zeros((len(training_word_pos_vec3D), model.num_classes))
            epoch_execmask = np.zeros(len(training_word_pos_vec3D)) # Which inputs were executed.
            training_targets = floatX(training_targets)    
        
        # In each epoch, we do a full pass over the training data:
        train_err = 0
        train_acc=0
        train_pi_loss=0
        train_batches = 0
        start_time = time.time()
        
        #Pi model:run batch
        if model.network_type=="pi":
            for batch in pro_data.iterate_minibatches_pi(training_word_pos_vec3D, \
              training_label_1hot, training_sen_length, model.batch_size, mask_train_input, shuffle=True):
                inputs, targets, mask_sen_length, mask_train = batch
                mark_input=model.mask(mask_sen_length,model.batch_size)
                err ,acc, pi_loss = train_fn(inputs, targets, mark_input, inputs, mask_train, unsup_weight, learning_rate, adam_beta1)
Example #53
0
def grad_scale(layer, scale):
    for param in layer.get_params(trainable=True):
        param.tag.grad_scale = floatX(scale)
    return layer
Example #54
0
def preproc_img(path):
    img = cv2.imread(path, cv2.IMREAD_COLOR)
    # img = cv2.resize(img, (480, 480), interpolation=cv2.INTER_AREA)
    img = np.transpose(img, (2, 0, 1))
    # img = img - MEAN_VALUE
    return floatX(img[np.newaxis])
Example #55
0
def lasagne_model(model_base, model_flavor, **params):
    import theano
    theano.config.floatX = 'float32'

    from theano import function as tfunction, shared as tshared
    from theano.tensor import tensor4, imatrix, nnet
    from theano.tensor import grad as Tgrad, mean as Tmean, reshape as Treshape

    from lasagne.utils import floatX
    from lasagne.updates import adam as lasagne_adam, total_norm_constraint
    from lasagne.layers import get_output as ll_output, \
        get_all_params as ll_all_params

    max_norm = 5.0

    verbose = params.get('verbose', False)
    overwrite = params.get('overwrite', True)

    sym_x = tensor4()  # [nbatch,imgchan,imgrows,imgcols] dims
    sym_y = imatrix()  # one-hot vector of [nb_class x 1] dims

    l_A_net = model_base['A_net']
    l_transform = model_base['transform']
    l_out = model_base['net_out']
    output_train = ll_output(l_out, sym_x, deterministic=False)
    output_shape = (-1, l_out.shape[1])  # nb_classes = l_out.shape[1]
    output_flat = treshape(output_train, output_shape)
    output_loss = nnet.categorical_crossentropy
    output_cost = tmean(output_loss(output_flat + tol, sym_y.flatten()))

    trainable_params = ll_all_params(l_out, trainable=True)

    all_grads = tgrad(output_cost, trainable_params)
    updates, norm = total_norm_constraint(all_grads,
                                          max_norm=max_norm,
                                          return_norm=True)

    shared_lr = tshared(floatX(update_lr))
    updates = lasagne_adam(updates,
                           trainable_params,
                           learning_rate=shared_lr,
                           beta_1=beta_1,
                           beta_2=beta_2,
                           epsilon=tol)

    model_train = tfunction([sym_x, sym_y], [output_cost, output_train, norm],
                            updates=updates)

    output_eval, l_A_eval = ll_output([l_out, l_A_net],
                                      sym_x,
                                      deterministic=True)
    model_eval = tfunction(
        [sym_x],
        [output_eval.reshape(output_shape),
         l_A_eval.reshape(output_shape)])
    model_batch = lambda X, y: model_train(X, int32(y))[0]
    model_pred = lambda X: model_eval(X)[0]
    model_xform = lambda X: layer_output(X, l_transform)
    model_save = lambda outf: save_all_weights(
        l_out, outf, overwrite=overwrite)
    model_load = lambda weightf: load_all_weights(l_out, weightf)

    return Model(package='lasagne',
                 backend='theano',
                 flavor=model_flavor,
                 base=model_base,
                 batch=model_batch,
                 predict=model_pred,
                 transform=model_xform,
                 save=model_save,
                 load=model_load,
                 params=params)
Example #56
0
def transfer_process(output_url, num):
    IMAGE_W = 600

    # Note: tweaked to use average pooling instead of maxpooling
    def build_model():
        net = {}
        net['input'] = InputLayer((1, 3, IMAGE_W, IMAGE_W))
        net['conv1_1'] = ConvLayer(net['input'],
                                   64,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv1_2'] = ConvLayer(net['conv1_1'],
                                   64,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')
        net['conv2_1'] = ConvLayer(net['pool1'],
                                   128,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv2_2'] = ConvLayer(net['conv2_1'],
                                   128,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')
        net['conv3_1'] = ConvLayer(net['pool2'],
                                   256,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv3_2'] = ConvLayer(net['conv3_1'],
                                   256,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv3_3'] = ConvLayer(net['conv3_2'],
                                   256,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv3_4'] = ConvLayer(net['conv3_3'],
                                   256,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')
        net['conv4_1'] = ConvLayer(net['pool3'],
                                   512,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv4_2'] = ConvLayer(net['conv4_1'],
                                   512,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv4_3'] = ConvLayer(net['conv4_2'],
                                   512,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv4_4'] = ConvLayer(net['conv4_3'],
                                   512,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')
        net['conv5_1'] = ConvLayer(net['pool4'],
                                   512,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv5_2'] = ConvLayer(net['conv5_1'],
                                   512,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv5_3'] = ConvLayer(net['conv5_2'],
                                   512,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['conv5_4'] = ConvLayer(net['conv5_3'],
                                   512,
                                   3,
                                   pad=1,
                                   flip_filters=False)
        net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad')

        return net

    # Download the normalized pretrained weights from:
    # https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/vgg19_normalized.pkl
    # (original source: https://bethgelab.org/deepneuralart/)

    # build VGG net and load weights
    net = build_model()

    values = pickle.load(open('vgg19_normalized.pkl'))['param values']
    lasagne.layers.set_all_param_values(net['pool5'], values)

    MEAN_VALUES = np.array([104, 117, 123]).reshape((3, 1, 1))

    def prep_image(im):
        if len(im.shape) == 2:
            im = im[:, :, np.newaxis]
            im = np.repeat(im, 3, axis=2)
        h, w, _ = im.shape
        if h < w:
            im = skimage.transform.resize(im, (IMAGE_W, w * IMAGE_W / h),
                                          preserve_range=True)
        else:
            im = skimage.transform.resize(im, (h * IMAGE_W / w, IMAGE_W),
                                          preserve_range=True)

        # Central crop
        h, w, _ = im.shape
        im = im[h // 2 - IMAGE_W // 2:h // 2 + IMAGE_W // 2,
                w // 2 - IMAGE_W // 2:w // 2 + IMAGE_W // 2]

        rawim = np.copy(im).astype('uint8')

        # Shuffle axes to c01
        im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

        # Convert RGB to BGR
        im = im[::-1, :, :]

        im = im - MEAN_VALUES
        return rawim, floatX(im[np.newaxis])

    photo = plt.imread("files/pic/pct.jpg")
    rawim, photo = prep_image(photo)
    #plt.imshow(rawim)

    art = plt.imread('files/style/style.jpg')
    rawim, art = prep_image(art)

    #plt.imshow(rawim)

    def gram_matrix(x):
        x = x.flatten(ndim=3)
        g = T.tensordot(x, x, axes=([2], [2]))
        return g

    def content_loss(P, X, layer):
        p = P[layer]
        x = X[layer]

        loss = 1. / 2 * ((x - p)**2).sum()
        return loss

    def style_loss(A, X, layer):
        a = A[layer]
        x = X[layer]

        A = gram_matrix(a)
        G = gram_matrix(x)

        N = a.shape[1]
        M = a.shape[2] * a.shape[3]

        loss = 1. / (4 * N**2 * M**2) * ((G - A)**2).sum()
        return loss

    def total_variation_loss(x):
        return (((x[:, :, :-1, :-1] - x[:, :, 1:, :-1])**2 +
                 (x[:, :, :-1, :-1] - x[:, :, :-1, 1:])**2)**1.25).sum()

    layers = ['conv4_2', 'conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
    layers = {k: net[k] for k in layers}

    # Precompute layer activations for photo and artwork
    input_im_theano = T.tensor4()
    outputs = lasagne.layers.get_output(layers.values(), input_im_theano)

    photo_features = {
        k: theano.shared(output.eval({input_im_theano: photo}))
        for k, output in zip(layers.keys(), outputs)
    }
    art_features = {
        k: theano.shared(output.eval({input_im_theano: art}))
        for k, output in zip(layers.keys(), outputs)
    }

    # Get expressions for layer activations for generated image
    generated_image = theano.shared(
        floatX(np.random.uniform(-128, 128, (1, 3, IMAGE_W, IMAGE_W))))

    gen_features = lasagne.layers.get_output(layers.values(), generated_image)
    gen_features = {k: v for k, v in zip(layers.keys(), gen_features)}

    # Define loss function
    losses = []

    # content loss
    losses.append(0.001 *
                  content_loss(photo_features, gen_features, 'conv4_2'))

    # style loss
    losses.append(0.2e6 * style_loss(art_features, gen_features, 'conv1_1'))
    losses.append(0.2e6 * style_loss(art_features, gen_features, 'conv2_1'))
    losses.append(0.2e6 * style_loss(art_features, gen_features, 'conv3_1'))
    losses.append(0.2e6 * style_loss(art_features, gen_features, 'conv4_1'))
    losses.append(0.2e6 * style_loss(art_features, gen_features, 'conv5_1'))

    # total variation penalty
    losses.append(0.1e-7 * total_variation_loss(generated_image))

    total_loss = sum(losses)

    grad = T.grad(total_loss, generated_image)

    # Theano functions to evaluate loss and gradient
    f_loss = theano.function([], total_loss)
    f_grad = theano.function([], grad)

    # Helper functions to interface with scipy.optimize
    def eval_loss(x0):
        x0 = floatX(x0.reshape((1, 3, IMAGE_W, IMAGE_W)))
        generated_image.set_value(x0)
        return f_loss().astype('float64')

    def eval_grad(x0):
        x0 = floatX(x0.reshape((1, 3, IMAGE_W, IMAGE_W)))
        generated_image.set_value(x0)
        return np.array(f_grad()).flatten().astype('float64')

    # Initialize with a noise image
    generated_image.set_value(
        floatX(np.random.uniform(-128, 128, (1, 3, IMAGE_W, IMAGE_W))))

    x0 = generated_image.get_value().astype('float64')
    xs = []
    xs.append(x0)

    # Optimize, saving the result periodically
    for i in range(num):
        print(i)
        scipy.optimize.fmin_l_bfgs_b(eval_loss,
                                     x0.flatten(),
                                     fprime=eval_grad,
                                     maxfun=40)
        x0 = generated_image.get_value().astype('float64')
        xs.append(x0)

    def deprocess(x):
        x = np.copy(x[0])
        x += MEAN_VALUES

        x = x[::-1]
        x = np.swapaxes(np.swapaxes(x, 0, 1), 1, 2)

        x = np.clip(x, 0, 255).astype('uint8')
        return x

    # plt.figure(figsize=(8, 8))
    for i in range(num):
        url = os.path.join(output_url, 'output%d.jpg' % i)
        plt.imsave(url, deprocess(xs[i]))
    """
Example #57
0
 def eval_grad(x0):
     x0 = floatX(x0.reshape((1, 3, IMAGE_W, IMAGE_W)))
     generated_image.set_value(x0)
     return np.array(f_grad()).flatten().astype('float64')
Example #58
0
 def update(lr, epoch):
     if epoch < start_at:
         return floatX(lr)
     else:
         k = ini_lr / decrease_epochs
         return floatX(np.max([0.0, lr - k]))
Example #59
0
 def test_loss(x0):
     x0 = floatX(x0.reshape((1, 3, IMAGE_W, IMAGE_W)))
     generated_image.set_value(x0)
     return f_test_loss().astype('float64')
Example #60
0
def l2_normalize(x, axis, epsilon=1e-12):
    epsilon = floatX(epsilon)
    x /= (epsilon + T.max(T.abs_(x), axis, keepdims=True))
    square_sum = T.sum(T.sqr(x), axis, keepdims=True)
    x /= T.sqrt(np.sqrt(epsilon) + square_sum)
    return x