Esempio n. 1
0
    def _create_iter_funcs(self, layers, objective, update, output_type):
        y_batch = output_type('y_batch')

        output_layer = layers[-1]
        objective_kw = self._get_params_for('objective')

        loss_train = objective(
            layers, target=y_batch, **objective_kw)
        loss_eval = objective(
            layers, target=y_batch, deterministic=True, **objective_kw)
        predict_proba = get_output(output_layer, None, deterministic=True)
        if not self.regression:
            predict = predict_proba.argmax(axis=1)
            accuracy = T.mean(T.eq(predict, y_batch))
        else:
            accuracy = loss_eval

        try:
            transform = get_output([v for k, v in layers.items() 
                                   if 'rmspool' in k or 'maxpool' in k][-1],
                                   None, deterministic=True)
        except IndexError:
            transform = get_output(layers.values()[-2], None,
                                   deterministic=True)

        all_params = self.get_all_params(trainable=True)
        update_params = self._get_params_for('update')
        updates = update(loss_train, all_params, **update_params)

        input_layers = [layer for layer in layers.values()
                        if isinstance(layer, InputLayer)]

        X_inputs = [theano.In(input_layer.input_var, name=input_layer.name)
                    for input_layer in input_layers]
        inputs = X_inputs + [theano.In(y_batch, name="y")]

        train_iter = theano.function(
            inputs=inputs,
            outputs=[loss_train],
            updates=updates,
            allow_input_downcast=True,
            )
        eval_iter = theano.function(
            inputs=inputs,
            outputs=[loss_eval, accuracy],
            allow_input_downcast=True,
            )
        predict_iter = theano.function(
            inputs=X_inputs,
            outputs=predict_proba,
            allow_input_downcast=True,
            )
        transform_iter = theano.function(
            inputs=X_inputs,
            outputs=transform,
            allow_input_downcast=True,
            )
        return train_iter, eval_iter, predict_iter, transform_iter
    def _create_iter_funcs(self, layers, objective, update, output_type):
        y_batch = output_type('y_batch')

        output_layer = list(layers.values())[-1]
        objective_params = self._get_params_for('objective')
        obj = objective(output_layer, **objective_params)
        if not hasattr(obj, 'layers'):
            # XXX breaking the Lasagne interface a little:
            obj.layers = layers

        loss_train = obj.get_loss(None, y_batch)
        loss_eval = obj.get_loss(None, y_batch, deterministic=True)
        predict_proba = get_output(output_layer, None, deterministic=True)

        try:
            transform = get_output([v for k, v in layers.items() 
                                   if 'rmspool' in k or 'maxpool' in k][-1],
                                   None, deterministic=True)
        except IndexError:
            transform = get_output(layers.values()[-2], None,
                                   deterministic=True)

        if not self.regression:
            predict = predict_proba.argmax(axis=1)
            accuracy = T.mean(T.eq(predict, y_batch))
        else:
            accuracy = loss_eval

        all_params = self.get_all_params(trainable=True)
        update_params = self._get_params_for('update')
        updates = update(loss_train, all_params, **update_params)

        input_layers = [layer for layer in layers.values()
                        if isinstance(layer, InputLayer)]

        X_inputs = [theano.Param(input_layer.input_var, name=input_layer.name)
                    for input_layer in input_layers]
        inputs = X_inputs + [theano.Param(y_batch, name="y")]

        train_iter = theano.function(
            inputs=inputs,
            outputs=[loss_train],
            updates=updates,
            )
        eval_iter = theano.function(
            inputs=inputs,
            outputs=[loss_eval, accuracy],
            )
        predict_iter = theano.function(
            inputs=X_inputs,
            outputs=predict_proba,
            )
        transform_iter = theano.function(
            inputs=X_inputs,
            outputs=transform,
            )
        return train_iter, eval_iter, predict_iter, transform_iter
Esempio n. 3
0
content_layer = switch_statement.get(argv.intensity)

cnn = build_cnn()
vgg19 = pickle.load(open('vgg19_normalized.pkl', 'rb'))['param values']
lasagne.layers.set_all_param_values(cnn['pool5'], vgg19)

art_im, photo_im = prep_image(argv.art, argv.photo)

layers = {
    i: cnn[i]
    for i in
    [content_layer, 'conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
}

input = T.tensor4()
outputs = lasagne.layers.get_output(layers.values(), input)

art_features = {
    i: theano.shared(output.eval({input: art_im}))
    for i, output in zip(layers.keys(), outputs)
}
photo_features = {
    i: theano.shared(output.eval({input: photo_im}))
    for i, output in zip(layers.keys(), outputs)
}

gen_image = theano.shared(floatX(np.copy(placeholder)))
gen_features = lasagne.layers.get_output(layers.values(), gen_image)
gen_features = {k: v for k, v in zip(layers.keys(), gen_features)}

losses = []
Esempio n. 4
0
    def _create_iter_funcs(self, layers, objective, update, output_type):
        y_batch = output_type('y_batch')

        output_layer = list(layers.values())[-1]
        objective_params = self._get_params_for('objective')
        obj = objective(output_layer, **objective_params)
        if not hasattr(obj, 'layers'):
            # XXX breaking the Lasagne interface a little:
            obj.layers = layers

        loss_train = obj.get_loss(None, y_batch)
        loss_eval = obj.get_loss(None, y_batch, deterministic=True)
        predict_proba = get_output(output_layer, None, deterministic=True)

        try:
            transform = get_output([
                v for k, v in layers.items()
                if 'rmspool' in k or 'maxpool' in k
            ][-1],
                                   None,
                                   deterministic=True)
        except IndexError:
            transform = get_output(layers.values()[-2],
                                   None,
                                   deterministic=True)

        if not self.regression:
            predict = predict_proba.argmax(axis=1)
            accuracy = T.mean(T.eq(predict, y_batch))
        else:
            accuracy = loss_eval

        all_params = self.get_all_params(trainable=True)
        update_params = self._get_params_for('update')
        updates = update(loss_train, all_params, **update_params)

        input_layers = [
            layer for layer in layers.values()
            if isinstance(layer, InputLayer)
        ]

        X_inputs = [
            theano.Param(input_layer.input_var, name=input_layer.name)
            for input_layer in input_layers
        ]
        inputs = X_inputs + [theano.Param(y_batch, name="y")]

        train_iter = theano.function(
            inputs=inputs,
            outputs=[loss_train],
            updates=updates,
        )
        eval_iter = theano.function(
            inputs=inputs,
            outputs=[loss_eval, accuracy],
        )
        predict_iter = theano.function(
            inputs=X_inputs,
            outputs=predict_proba,
        )
        transform_iter = theano.function(
            inputs=X_inputs,
            outputs=transform,
        )
        return train_iter, eval_iter, predict_iter, transform_iter
Esempio n. 5
0
    def _create_iter_funcs(self, layers, objective, update, output_type):
        y_batch = output_type('y_batch')

        output_layer = layers[-1]
        objective_kw = self._get_params_for('objective')

        loss_train = objective(layers, target=y_batch, **objective_kw)
        loss_eval = objective(layers,
                              target=y_batch,
                              deterministic=True,
                              **objective_kw)
        predict_proba = get_output(output_layer, None, deterministic=True)
        if not self.regression:
            predict = predict_proba.argmax(axis=1)
            accuracy = T.mean(T.eq(predict, y_batch))
        else:
            accuracy = loss_eval

        try:
            transform = get_output(
                [
                    v for k, v in layers.items()
                    #if 'maxout' in k][-1],                       # <-- Extract features from the maxout layer (I am not sure if the maxout is really the last - recommended)
                    #if 'rmspool' in k or 'maxpool' in k][-1],
                    if 'dense' in k
                ][-2],
                None,
                deterministic=True)
        except IndexError:
            transform = get_output(layers.values()[-2],
                                   None,
                                   deterministic=True)

        all_params = self.get_all_params(trainable=True)
        grads = theano.grad(loss_train, all_params)
        for idx, param in enumerate(all_params):
            grad_scale = getattr(param.tag, 'grad_scale', 1)
            if grad_scale != 1:
                grads[idx] *= grad_scale
        update_params = self._get_params_for('update')
        updates = update(loss_train, all_params, **update_params)

        input_layers = [
            layer for layer in layers.values()
            if isinstance(layer, InputLayer)
        ]

        X_inputs = [
            theano.Param(input_layer.input_var, name=input_layer.name)
            for input_layer in input_layers
        ]
        inputs = X_inputs + [theano.Param(y_batch, name="y")]

        train_iter = theano.function(
            inputs=inputs,
            outputs=[loss_train],
            updates=updates,
            allow_input_downcast=True,
        )
        eval_iter = theano.function(
            inputs=inputs,
            outputs=[loss_eval, accuracy],
            allow_input_downcast=True,
        )
        predict_iter = theano.function(
            inputs=X_inputs,
            outputs=predict_proba,
            allow_input_downcast=True,
        )
        transform_iter = theano.function(
            inputs=X_inputs,
            outputs=transform,
            allow_input_downcast=True,
        )
        return train_iter, eval_iter, predict_iter, transform_iter