def test_binary_accuracy_with_threshold_(self):
        y_true = Input((2,))
        y_pred = Input((2,))
        threshold = K.placeholder((2,))
        acc = binary_accuracy_with_threshold(y_true,y_pred,threshold)
        self.assertEqual(K.ndim(acc), 0)
        binary_accuracy_with_threshold_func = K.function(inputs=[y_true,y_pred,threshold], outputs=[acc])
        acc_val=binary_accuracy_with_threshold_func([np.array([[0,1],[1,0]]),np.array([[0.2,0.6],[0.3,0.1]]),np.array([0.25,0.4])])[0]
        self.assertEqual(round(acc_val,2), 1.00,"acc_val")

        #works on a single threshold
        threshold = K.placeholder(ndim=0)
        acc = binary_accuracy_with_threshold(y_true, y_pred, threshold)
        binary_accuracy_with_threshold_func = K.function(inputs=[y_true, y_pred, threshold], outputs=[acc])
        acc_val = binary_accuracy_with_threshold_func(
            [np.array([[0, 1], [1, 0]]), np.array([[0.2, 0.6], [0.3, 0.1]]), 0.5])[0]
        self.assertEqual(round(acc_val, 2), 0.75, "acc_val")

        # works on 3 dimension inputs
        y_true = Input((None,2))
        y_pred = Input((None,2))
        threshold = K.placeholder((2,))
        acc = binary_accuracy_with_threshold(y_true,y_pred,threshold)
        self.assertEqual(K.ndim(acc), 0)
        binary_accuracy_with_threshold_func = K.function(inputs=[y_true,y_pred,threshold], outputs=[acc])
        acc_val=binary_accuracy_with_threshold_func([np.array([[[0,1]],[[1,0]]]),np.array([[[0.2,0.6]],[[0.3,0.1]]]),np.array([0.25,0.4])])[0]
        self.assertEqual(round(acc_val,2), 1.00,"acc_val")
示例#2
0
文件: test_call.py 项目: AI42/keras
def test_sequential_call():
    """Test keras.models.Sequential.__call__"""
    nb_samples, input_dim, output_dim = 3, 10, 5
    model = Sequential()
    model.add(Dense(output_dim=output_dim, input_dim=input_dim))
    model.compile('sgd', 'mse')

    # test flat model
    X = K.placeholder(ndim=2)
    Y = model(X)
    f = K.function([X], [Y])

    x = np.ones((nb_samples, input_dim)).astype(K.floatx())
    y1 = f([x])[0].astype(K.floatx())
    y2 = model.predict(x)
    # results of __call__ should match model.predict
    assert_allclose(y1, y2)

    # test nested model
    model2 = Sequential()
    model2.add(model)
    model2.compile('sgd', 'mse')

    Y2 = model2(X)
    f = K.function([X], [Y2])

    y1 = f([x])[0].astype(K.floatx())
    y2 = model2.predict(x)
    # results of __call__ should match model.predict
    assert_allclose(y1, y2)
示例#3
0
    def build_model(self, p):
        S = Input(p['input_shape'], name='input_state')
        A = Input((1,), name='input_action', dtype='int32')
        R = Input((1,), name='input_reward')
        T = Input((1,), name='input_terminate', dtype='int32')
        NS = Input(p['input_shape'], name='input_next_sate')

        self.Q_model = self.build_cnn_model(p)
        self.Q_old_model = self.build_cnn_model(p, False)  # Q hat in paper
        self.Q_old_model.set_weights(self.Q_model.get_weights())  # Q' = Q

        Q_S = self.Q_model(S)  # batch * actions
        Q_NS = disconnected_grad(self.Q_old_model(NS))  # disconnected gradient is not necessary

        y = R + p['discount'] * (1-T) * K.max(Q_NS, axis=1, keepdims=True)  # batch * 1

        action_mask = K.equal(Tht.arange(p['num_actions']).reshape((1, -1)), A.reshape((-1, 1)))
        output = K.sum(Q_S * action_mask, axis=1).reshape((-1, 1))
        loss = K.sum((output - y) ** 2)  # sum could also be mean()

        optimizer = adam(p['learning_rate'])
        params = self.Q_model.trainable_weights
        update = optimizer.get_updates(params, [], loss)

        self.training_func = K.function([S, A, R, T, NS], loss, updates=update)
        self.Q_func = K.function([S], Q_S)
示例#4
0
    def compile(self, optimizer, loss):
        self.optimizer = optimizers.get(optimizer)

        self.loss = objectives.get(loss)

        # input of model
        self.X_train = self.get_input(train=True)
        self.X_test = self.get_input(train=False)

        train_loss = self.loss(self.X_train)
        test_loss = self.loss(self.X_test)

        train_loss.name = 'train_loss'
        test_loss.name = 'test_loss'

        for r in self.regularizers:
            train_loss = r(train_loss)
        updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
        updates += self.updates

        if type(self.X_train) == list:
            train_ins = self.X_train
            test_ins = self.X_test
        else:
            train_ins = [self.X_train]
            test_ins = [self.X_test]

        self._train = K.function(train_ins, train_loss, updates=updates)
        self._test = K.function(test_ins, test_loss)
示例#5
0
文件: Trainer.py 项目: Nioy/faceswap
    def setup(self):
        distorted_A, fake_A, fake_sz64_A, mask_A, self.path_A, self.path_mask_A, self.path_abgr_A, self.path_bgr_A = self.cycle_variables(self.model.netGA)
        distorted_B, fake_B, fake_sz64_B, mask_B, self.path_B, self.path_mask_B, self.path_abgr_B, self.path_bgr_B = self.cycle_variables(self.model.netGB)
        real_A = Input(shape=self.model.img_shape)
        real_B = Input(shape=self.model.img_shape)

        if self.use_lsgan:
            self.loss_fn = lambda output, target : K.mean(K.abs(K.square(output-target)))
        else:
            self.loss_fn = lambda output, target : -K.mean(K.log(output+1e-12)*target+K.log(1-output+1e-12)*(1-target))

        # ========== Define Perceptual Loss Model==========
        if self.use_perceptual_loss:
            from keras.models import Model
            from keras_vggface.vggface import VGGFace
            vggface = VGGFace(include_top=False, model='resnet50', input_shape=(224, 224, 3))
            vggface.trainable = False
            out_size55 = vggface.layers[36].output
            out_size28 = vggface.layers[78].output
            out_size7 = vggface.layers[-2].output
            vggface_feat = Model(vggface.input, [out_size55, out_size28, out_size7])
            vggface_feat.trainable = False
        else:
            vggface_feat = None

        loss_DA, loss_GA = self.define_loss(self.model.netDA, real_A, fake_A, fake_sz64_A, distorted_A, vggface_feat)
        loss_DB, loss_GB = self.define_loss(self.model.netDB, real_B, fake_B, fake_sz64_B, distorted_B, vggface_feat)

        if self.use_mask_refinement:
            loss_GA += 1e-3 * K.mean(K.square(mask_A))
            loss_GB += 1e-3 * K.mean(K.square(mask_B))
        else:
            loss_GA += 3e-3 * K.mean(K.abs(mask_A))
            loss_GB += 3e-3 * K.mean(K.abs(mask_B))

        w_fo = 0.01
        loss_GA += w_fo * K.mean(self.first_order(mask_A, axis=1))
        loss_GA += w_fo * K.mean(self.first_order(mask_A, axis=2))
        loss_GB += w_fo * K.mean(self.first_order(mask_B, axis=1))
        loss_GB += w_fo * K.mean(self.first_order(mask_B, axis=2))

        weightsDA = self.model.netDA.trainable_weights
        weightsGA = self.model.netGA.trainable_weights
        weightsDB = self.model.netDB.trainable_weights
        weightsGB = self.model.netGB.trainable_weights

        # Adam(..).get_updates(...)
        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsDA,[],loss_DA)
        self.netDA_train = K.function([distorted_A, real_A],[loss_DA], training_updates)
        training_updates = Adam(lr=self.lrG, beta_1=0.5).get_updates(weightsGA,[], loss_GA)
        self.netGA_train = K.function([distorted_A, real_A], [loss_GA], training_updates)

        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsDB,[],loss_DB)
        self.netDB_train = K.function([distorted_B, real_B],[loss_DB], training_updates)
        training_updates = Adam(lr=self.lrG, beta_1=0.5).get_updates(weightsGB,[], loss_GB)
        self.netGB_train = K.function([distorted_B, real_B], [loss_GB], training_updates)
    def compile(self, multi_optimizer):
        updates = multi_optimizer.get_updates(
            self.params, self.constraints, self.objectives)

        ins = [self.inputs[name] for name in self.input_order]
        self._train_fn = K.function(
            ins, list(self.metrics.values()),
            updates=updates + self.additional_updates)
        if self.debug_map:
            self._debug_fn = K.function(ins, list(self.debug_map.values()))
示例#7
0
    def __init__(self, replay_filename, group_name, model_filename=''):
        # Set learning phase to TEST
        self.learning_phase = TEST_MODE

        # If not informed, defaults to '_model' suffix
        if model_filename == '':
            model_filename = '{}_model.h5'.format(group_name)

        # Loads Keras model
        self.model = load_model(model_filename)
        # Loads ReplayData file
        self.replay_data = h5py.File('{}'.format(replay_filename), 'r')
        self.group_name = group_name
        self.group = self.replay_data[self.group_name]

        # Retrieves some basic information from the replay data
        self.inputs = self.group['inputs'][:]
        self.targets = self.group['targets'][:]
        self.n_epochs = self.group.attrs['n_epochs']
        self.n_layers = self.group.attrs['n_layers']
        # Retrieves weights as a list, each element being one epoch
        self.weights = self._retrieve_weights()

        # Gets Tensors for the weights in the same order as the layers
        # Keras' model.weights returns the Tensors in a different order!
        self._model_weights = [w for layer in self.model.layers for w in layer.weights]

        ### Functions
        # Keras function to get the outputs, given inputs and weights
        self._get_output = K.function(inputs=[K.learning_phase()] + self.model.inputs + self._model_weights,
                                      outputs=[self.model.layers[-1].output])
        # Keras function to get the loss and metrics, given inputs, targets, weights and sample weights
        self._get_metrics = K.function(inputs=[K.learning_phase()] + self.model.inputs + self.model.targets +
                                              self._model_weights + self.model.sample_weights,
                                       outputs=[self.model.total_loss] + self.model.metrics_tensors)
        # Keras function to compute the binary cross entropy, given inputs, targets, weights and sample weights
        self._get_binary_crossentropy = K.function(inputs=[K.learning_phase()] + self.model.inputs +
                                                          self.model.targets + self._model_weights +
                                                          self.model.sample_weights,
                                                   outputs=[K.binary_crossentropy(self.model.targets[0],
                                                                                  self.model.outputs[0])])

        # Attributes for the visualizations - Data
        self._feature_space_data = None
        self._loss_hist_data = None
        self._loss_and_metric_data = None
        self._prob_hist_data = None
        self._decision_boundary_data = None
        # Attributes for the visualizations - Plot objects
        self._feature_space_plot = None
        self._loss_hist_plot = None
        self._loss_and_metric_plot = None
        self._prob_hist_plot = None
        self._decision_boundary_plot = None
示例#8
0
  def __init__(self, state_size, num_actuators, args):
    # remember parameters
    self.state_size = state_size
    self.num_actuators = num_actuators
    self.discount_rate = args.discount_rate
    self.target_rate = args.target_rate
    self.noise = args.noise
    self.noise_scale = args.noise_scale

    x, u, m, v, q, p, a = self._createLayers(args)

    # wrappers around computational graph
    fmu = K.function([K.learning_phase(), x], m)
    self.mu = lambda x: fmu([0, x])

    fP = K.function([K.learning_phase(), x], p)
    self.P = lambda x: fP([0, x])

    fA = K.function([K.learning_phase(), x, u], a)
    self.A = lambda x, u: fA([0, x, u])

    fQ = K.function([K.learning_phase(), x, u], q)
    self.Q = lambda x, u: fQ([0, x, u])

    # main model
    self.model = Model(input=[x,u], output=q)
    self.model.summary()

    if args.optimizer == 'adam':
      optimizer = Adam(args.learning_rate)
    elif args.optimizer == 'rmsprop':
      optimizer = RMSprop(args.learning_rate)
    else:
      assert False
    self.model.compile(optimizer=optimizer, loss='mse')

    if args.optimizer == 'adam':
      optimizer = Adam(args.learning_rate)
    elif args.optimizer == 'rmsprop':
      optimizer = RMSprop(args.learning_rate)
    else:
      assert False
    self.model.compile(optimizer=optimizer, loss='mse')

    # another set of layers for target model
    x, u, m, v, q, p, a = self._createLayers(args)

    # V() function uses target model weights
    fV = K.function([K.learning_phase(), x], v)
    self.V = lambda x: fV([0, x])

    # target model is initialized from main model
    self.target_model = Model(input=[x,u], output=q)
    self.target_model.set_weights(self.model.get_weights())
    def _deconv(self, X, lname, d_switch, feat_map=None):
        o_width, o_height = self[lname].output_shape[-2:]

        # Get filter size
        f_width = self[lname].W_shape[2]
        f_height = self[lname].W_shape[3]

        # Compute padding needed
        i_width, i_height = X.shape[-2:]
        pad_width = (o_width - i_width + f_width - 1) / 2
        pad_height = (o_height - i_height + f_height - 1) / 2

        assert isinstance(
            pad_width, int), "Pad width size issue at layer %s" % lname
        assert isinstance(
            pad_height, int), "Pad height size issue at layer %s" % lname

        # Set to zero based on switch values
        X[d_switch[lname]] = 0
        # Get activation function
        activation = self[lname].activation
        X = activation(X)
        if feat_map is not None:
            print "Setting other feat map to zero"
            for i in range(X.shape[1]):
                if i != feat_map:
                    X[:, i, :, :] = 0
            print "Setting non max activations to zero"
            for i in range(X.shape[0]):
                iw, ih = np.unravel_index(
                    X[i, feat_map, :, :].argmax(), X[i, feat_map, :, :].shape)
                m = np.max(X[i, feat_map, :, :])
                X[i, feat_map, :, :] = 0
                X[i, feat_map, iw, ih] = m
        # Get filters. No bias for now
        W = self[lname].W
        # Transpose filter
        W = W.transpose([1, 0, 2, 3])
        W = W[:, :, ::-1, ::-1]
        # CUDNN for conv2d ?
        conv_out = K.T.nnet.conv2d(
            input=self.x, filters=W, border_mode='valid')
        # Add padding to get correct size
        pad = K.function([self.x], K.spatial_2d_padding(
            self.x, padding=(pad_width, pad_height), dim_ordering="th"))
        X_pad = pad([X])
        # Get Deconv output
        deconv_func = K.function([self.x], conv_out)
        X_deconv = deconv_func([X_pad])
        assert X_deconv.shape[-2:] == (o_width, o_height),\
            "Deconv output at %s has wrong size" % lname
        return X_deconv
示例#10
0
文件: Trainer.py 项目: Nioy/faceswap
    def cycle_variables(self, netG):
        distorted_input = netG.inputs[0]
        fake_output = netG.outputs[0]
        fake_output64 = netG.outputs[1]
        alpha = Lambda(lambda x: x[:,:,:, :1])(fake_output)
        rgb = Lambda(lambda x: x[:,:,:, 1:])(fake_output)

        masked_fake_output = alpha * rgb + (1-alpha) * distorted_input

        fn_generate = K.function([distorted_input], [masked_fake_output])
        fn_mask = K.function([distorted_input], [concatenate([alpha, alpha, alpha])])
        fn_abgr = K.function([distorted_input], [concatenate([alpha, rgb])])
        fn_bgr = K.function([distorted_input], [rgb])
        return distorted_input, fake_output, fake_output64, alpha, fn_generate, fn_mask, fn_abgr, fn_bgr
    def _backward_pass(self, X, target_layer, d_switch, feat_map):
        # Run deconv/maxunpooling until input pixel space
        layer_index = self.lnames.index(target_layer)
        # Get the output of the target_layer of interest
        layer_output = K.function(
            [self[self.lnames[0]].input], self[target_layer].output)
        X_outl = layer_output([X])
        # Special case for the starting layer where we may want
        # to switchoff somes maps/ activations
        print "Deconvolving %s..." % target_layer
        if "maxpooling2d" in target_layer:
            X_maxunp = K.pool.max_pool_2d_same_size(
                self[target_layer].input, self[target_layer].pool_size)
            unpool_func = K.function([self[self.lnames[0]].input], X_maxunp)
            X_outl = unpool_func([X])
            if feat_map is not None:
                for i in range(X_outl.shape[1]):
                    if i != feat_map:
                        X_outl[:, i, :, :] = 0
                for i in range(X_outl.shape[0]):
                    iw, ih = np.unravel_index(
                        X_outl[i, feat_map, :, :].argmax(), X_outl[i, feat_map, :, :].shape)
                    m = np.max(X_outl[i, feat_map, :, :])
                    X_outl[i, feat_map, :, :] = 0
                    X_outl[i, feat_map, iw, ih] = m
        elif "convolution2d" in target_layer:
            X_outl = self._deconv(X_outl, target_layer,
                                  d_switch, feat_map=feat_map)
        else:
            raise ValueError(
                "Invalid layer name: %s \n Can only handle maxpool and conv" % target_layer)
        # Iterate over layers (deepest to shallowest)
        for lname in self.lnames[:layer_index][::-1]:
            print "Deconvolving %s..." % lname
            # Unpool, Deconv or do nothing
            if "maxpooling2d" in lname:
                p1, p2 = self[lname].pool_size
                uppool = K.function(
                    [self.x], K.resize_images(self.x, p1, p2, "th"))
                X_outl = uppool([X_outl])

            elif "convolution2d" in lname:
                X_outl = self._deconv(X_outl, lname, d_switch)
            elif "padding" in lname:
                pass
            else:
                raise ValueError(
                    "Invalid layer name: %s \n Can only handle maxpool and conv" % lname)
        return X_outl
示例#12
0
    def set_batch_function(self, model, input_shape, batch_size, nb_actions, gamma):
        input_dim = np.prod(input_shape)
        samples = K.placeholder(shape=(batch_size, input_dim * 2 + 3))

        S = samples[:, 0 : input_dim]
        a = samples[:, input_dim]
        a = K.cast(a, '')
        r = samples[:, input_dim + 1]
        S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2]
        game_over = samples[:, 2 * input_dim + 2 : 2 * input_dim + 3]

        r = K.reshape(r, (batch_size, 1))
        r = K.repeat(r, nb_actions)
        r = K.reshape(r, (batch_size, nb_actions))

        game_over = K.repeat(game_over, nb_actions)
        game_over = K.reshape(game_over, (batch_size, nb_actions))

        S = K.reshape(S, (batch_size, ) + input_shape)
        S_prime = K.reshape(S_prime, (batch_size, ) + input_shape)

        X = K.concatenate([S, S_prime], axis=0)
        Y = model(X)

        Qsa = K.max(Y[batch_size:], axis=1)
        Qsa = K.reshape(Qsa, (batch_size, 1))
        Qsa = K.repeat(Qsa, nb_actions)
        Qsa = K.reshape(Qsa, (batch_size, nb_actions))

        delta = K.reshape(self.one_hot(a, nb_actions), (batch_size, nb_actions))
        targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa)

        self.batch_function = K.function(inputs=[samples], outputs=[S, targets])
示例#13
0
    def Visualize(self,img_path, output_path):
        original_img = cv2.imread(img_path, 1)
        width, height, _ = original_img.shape
        #Reshape to the network input shape (3, w, h).
        #img = np.array([np.transpose(np.float32(original_img), (2, 0, 1))])

        #Get the 512 input weights to the softmax.
        class_weights = self.model.layers[-1].get_weights()[0]
        final_conv_layer = self.get_output_layer(self.model, "conv2d_26")
        get_output = K.function([self.model.layers[0].input], \
                    [final_conv_layer.output, 
                    self.model.layers[-1].output])
        [conv_outputs, predictions] = get_output([np.array([original_img])])
        conv_outputs = conv_outputs[0, :, :, :]
        print(predictions)
        #Create the class activation map.
        cam = np.ones(conv_outputs.shape[0 : 2], dtype = np.float32)
        target_class = 1

        for i, w in enumerate(class_weights[:, target_class]):
                cam+= w * conv_outputs[:, :,i]
                
        print("predictions", predictions)
        cam /= np.max(cam)
        cam = cv2.resize(cam, (height, width))
        print(cam.shape)
        cam /= np.max(cam)
        cam = cv2.resize(cam, (height, width))
        heatmap = cv2.applyColorMap(np.uint8(255*cam), cv2.CV_8UC1)
        heatmap[np.where(cam < 0.2)] = 0
        img = heatmap*0.5 + original_img        
        cv2.imwrite(output_path, img)
 def get_features(self, x, layers):
     if not layers:
         return None
     f = K.function([self.net_input], [self.get_layer_output(layer_name) for layer_name in layers])
     feature_outputs = f([x])
     features = dict(zip(layers, feature_outputs))
     return features
示例#15
0
def test_relu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.relu(x)])

    test_values = get_standard_values()
    result = f([test_values])[0]
    assert_allclose(result, test_values, rtol=1e-05)
  def __init__(self, model, outchannels=[], verbose=1):

    # Bacnend: either tensorflow or theano)
    self.backend = K.backend()

    # load model supports keras.Model and keras.Sequential
    if isinstance(model, Sequential):
      self.model = model.model
    elif isinstance(model, Model):
      self.model = model
    else:
      print("Invalid input model")
      return -1

    # load input tensors
    self.input_tensors = []
    for i in self.model.inputs:
      self.input_tensors.append(i)
    # The learning phase flag is a bool tensor (0 = test, 1 = train)
    # to be passed as input to any Keras function that uses
    # a different behavior at train time and test time.
    self.input_tensors.append(K.learning_phase())

    # If outputchanel is specified, use it.
    # Otherwise evalueate all outputs.
    self.outchannels = outchannels
    if len(self.outchannels) == 0:
      if verbose: print("Evaluated output channel (0-based index): All")
      if K.backend() == "tensorflow":
        self.outchannels = range(self.model.output.shape[1]._value)
      elif K.backend() == "theano":
        self.outchannels = range(model1.output._keras_shape[1])
    else:
      if verbose:
        print("Evaluated output channels (0-based index):")
        print(','.join([str(i) for i in self.outchannels]))

    # Build gradient functions for desired output channels.
    self.get_gradients = {}
    if verbose: print("Building gradient functions")

    # Evaluate over all channels.
    for c in self.outchannels:
      # Get tensor that calcuates gradient
      if K.backend() == "tensorflow":
        gradients = self.model.optimizer.get_gradients(self.model.output[:, c], self.model.input)
      if K.backend() == "theano":
        gradients = self.model.optimizer.get_gradients(self.model.output[:, c].sum(), self.model.input)

      # Build computational graph that calculates the tensfor given inputs
      self.get_gradients[c] = K.function(inputs=self.input_tensors, outputs=gradients)

      # This takes a lot of time for a big model with many tasks.
      # So lets pring the progress.
      if verbose:
        sys.stdout.write('\r')
        sys.stdout.write("Progress: " + str(int((c + 1) * 1.0 / len(self.outchannels) * 1000) * 1.0 / 10) + "%")
        sys.stdout.flush()
    # Done
    if verbose: print("\nDone.")
示例#17
0
def get_features(model, dataset, position, N, training_params, verbose, flip=False):

    intermediate_outputs = K.function([model.layers[0].input], [model.layers[position].get_output(train=False)])

    if N==0:
        raise Exception("Ntest = 0.")
    for i in range(N):
        if verbose:
            print "\rBatch %d over %d"%(i,N),
        # Get next batch
        batch,targets= dataset.get_batch()
        # Eventually flip
        if flip:
            batch = np.fliplr(batch.transpose(1,2,3,0)).transpose(3,0,1,2)
        # Preprocess
        for mode in training_params.valid_preprocessing:
            batch = preprocess_dataset(batch, training_params, mode)
        # Predict
        pred = intermediate_outputs([np.array(batch.transpose(0,3,1,2), "float32")])[0]
        if pred.shape[1] != 256 and pred.shape[1] != 512:
            raise Exception("not the good layer. Dim = %d"%pred.shape[1])
        # Accumulate preds
        if i==0:
            predictions = np.copy(pred)
            labels = np.copy(convert_labels(targets))
        else:
            predictions = np.concatenate((predictions,pred))
            labels = np.concatenate((labels,convert_labels(targets)))

    return predictions, labels
示例#18
0
def get_mc_predictions(model, X, nb_iter=50, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param nb_iter:
    :param batch_size:
    :return:
    """
    output_dim = model.layers[-1].output.shape[-1].value
    get_output = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-1].output]
    )

    def predict():
        n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
        output = np.zeros(shape=(len(X), output_dim))
        for i in range(n_batches):
            output[i * batch_size:(i + 1) * batch_size] = \
                get_output([X[i * batch_size:(i + 1) * batch_size], 1])[0]
        return output

    preds_mc = []
    for i in tqdm(range(nb_iter)):
        preds_mc.append(predict())

    return np.asarray(preds_mc)
示例#19
0
    def build_model(self):
        """Build a Critic (Value) model that maps (state, action)> Q_values."""
        # Input layers
        states = layers.Input(shape=(self.state_size,), name="states")
        actions = layers.Input(shape=(self.action_size,), name="actions")

        # Add some hidden layers to state pathway
        net_states = layers.Dense(units=32, activation="relu")(states)
        net_states = layers.Dense(units=64, activation="relu")(net_states)

        # Add some hidden layers to action pathway
        net_actions = layers.Dense(units=32, activation="relu")(actions)
        net_actions = layers.Dense(units=64, activation="relu")(net_actions)

        # Combine both pathways
        net = layers.Add()([net_states, net_actions])
        net = layers.Activation('relu')(net)

        Q_values = layers.Dense(units=1, name='q_values')(net)

        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        optimizer = optimizers.Adam()
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used
        # by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.inputs, K.learning_phase()],
            outputs=action_gradients)
示例#20
0
 def set_f_outputs(self, combo_img, loss, grads):
     outputs = [loss]
     if type(grads) in {list, tuple}:
         outputs += grads
     else:
         outputs.append(grads)
     return K.function([combo_img], outputs)
示例#21
0
def pretrain(model,data,learning_rate,layer_activations,output_activation,optimizer):
    weights=model.get_weights()
    weigts_dummy=weights
    layer_input=data
    for i in range(int(len(weights)/4)):
        if i==0:
            inputs=Input(shape=(np.size(weights[2*i],axis=0),))
            layer_1=Dense(np.size(weights[2*i],axis=1),activation=layer_activations)(inputs)
            predictions=Dense(np.size(weights[2*i],axis=0),activation=output_activation)(layer_1)
        else:
            inputs=Input(shape=(np.size(weights[2*i],axis=0),))
            layer_1=Dense(np.size(weights[2*i],axis=1),activation=layer_activations)(inputs)
            predictions=Dense(np.size(weights[2*i],axis=0),activation=layer_activations)(layer_1)
        stacked_ae=Model(inputs=inputs,outputs=predictions)
        stacked_ae.compile(optimizer=optimizer, loss='mean_squared_error')
        print(i)
        stacked_ae.fit(layer_input,layer_input,shuffle=True,epochs=10,batch_size=64)
        get_h_layer_output = K.function([stacked_ae.layers[0].input],
                                          [stacked_ae.layers[1].output])
        layer_1_predictions = get_h_layer_output([layer_input, 0])[0]
        w=stacked_ae.get_weights()
        del layer_input,layer_1,inputs,predictions,get_h_layer_output
        layer_input=layer_1_predictions
        weigts_dummy[2*i]=w[0]
        weigts_dummy[2*i+1]=w[1]
        weigts_dummy[len(weights)-2*i-2]=w[2]
        weigts_dummy[len(weights)-2*i-1]=w[3]
        del w
    model.set_weights(weigts_dummy)
    return model
示例#22
0
文件: run.py 项目: kalpitdixit/ACM
def build_train_fn(model):
    # cost
    lr = T.scalar()
    labels = K.placeholder(ndim=2, dtype='int32')
    ob_input = model.inputs[0]
    raw_softmax_outputs = model.outputs[0]

    softmax_outputs = raw_softmax_outputs.dimshuffle((2,0,1))
    softmax_outputs = softmax_outputs.reshape((softmax_outputs.shape[0], softmax_outputs.shape[1]*softmax_outputs.shape[2]))
    softmax_outputs = softmax_outputs.dimshuffle((1,0))

    cost = categorical_crossentropy(softmax_outputs, labels).mean()

    # gradients
    trainable_vars = model.trainable_weights
    grads = K.gradients(cost, trainable_vars)
    grads = lasagne.updates.total_norm_constraint(grads, 100)
    updates = lasagne.updates.nesterov_momentum(grads, trainable_vars, lr, 0.99)

    for key, val in model.updates:                              
        updates[key] = val

    # train_fn
    train_fn = K.function([ob_input, labels, K.learning_phase(), lr],
                          [softmax_outputs, cost],
                          updates=updates)

    return train_fn
示例#23
0
def plot_attention(sentence, Tx=20, Ty=25):
    """
    可视化Attention层

    @param sentence: 待翻译的句子,str类型
    @param Tx: 输入句子的长度
    @param Ty: 输出句子的长度
    """

    X = np.array(text_to_int(sentence, source_vocab_to_int))
    f = K.function(model.inputs, [model.layers[9].get_output_at(t) for t in range(Ty)])

    s0 = np.zeros((1, n_s))
    c0 = np.zeros((1, n_s))
    out0 = np.zeros((1, len(target_vocab_to_int)))

    r = f([X.reshape(-1, 20), s0, c0, out0])

    attention_map = np.zeros((Ty, Tx))
    for t in range(Ty):
        for t_prime in range(Tx):
            attention_map[t][t_prime] = r[t][0, t_prime, 0]

    Y = make_prediction(sentence)

    source_list = sentence.split()
    target_list = Y.split()

    f, ax = plt.subplots(figsize=(20, 15))
    sns.heatmap(attention_map, xticklabels=source_list, yticklabels=target_list, cmap="YlGnBu")
    ax.set_xticklabels(ax.get_xticklabels(), fontsize=15, rotation=90)
    ax.set_yticklabels(ax.get_yticklabels(), fontsize=15)
示例#24
0
def extract(matlabfile,outfile):
	res_ims = []
	with open(matlabfile) as matfile:
		mat = sio.loadmat(matfile)
#		ys = mat['labels']
		ims = mat['ims'][0]
		for im in ims:
			im = cv2.resize(im, (224, 224)).astype(np.float32)
			im[:,:,0] -= 103.939
			im[:,:,1] -= 116.779
			im[:,:,2] -= 123.68
			im = im.transpose((2,0,1))
			im = np.expand_dims(im, axis=0)
			res_ims.append(im)
	#%%
	layer_outputs = []
	model = vgg.VGG_19('vgg19_weights.h5')
	sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) #not used since the weights are pre-trained
	model.compile(optimizer=sgd, loss='categorical_crossentropy')

	#get_37th_layer_output = K.function([model.layers[0].input], [model.layers[38].output]) #37 = flatten
	get_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[41].output]) #41 = last layer-1
	i = 0
	l = '('+str(len(res_ims))+')'
	for im in res_ims:
		print 'progress',str(i)+l
		i += 1
	#	layer_output = get_37th_layer_output([im])[0][0]
		layer_output = get_layer_output([im, 1])[0][0]
		layer_outputs.append(layer_output)

	Xtrain = np.array(layer_outputs).T
	np.save(outfile,Xtrain)
示例#25
0
    def get_training_function(self, x, y):
        model = self.model
        cost, grads = self.get_cost_grads()
        outs = [cost]
        if type(grads) in {list, tuple}:
            outs += grads
        else:
            outs.append(grads)

        fn = K.function(
            inputs=[],
            outputs=outs,
            givens={
                model.inputs[0]: x,
                model.targets[0]: y,
                model.sample_weights[0]: np.ones(
                    (x.shape[0],
                     ),
                    dtype=np.float32),
                K.learning_phase(): np.uint8(1)})

        def train_fn(theta):
            self.set_model_params(theta)
            cost_grads = fn([])
            cost = np.asarray(cost_grads[0], dtype=np.float64)
            grads = np.asarray(self.flatten_grads(cost_grads[1:]), dtype=np.float64)

            return cost, grads

        return train_fn
    def build_model_policy_value(self, model, max_cache_size=100000):
        from collections import OrderedDict
        cache = OrderedDict()
        from tensorflow.python.keras import backend as K
        get_output = K.function([model.input, K.learning_phase()], [model.output[0], model.output[1]])
        def model_policy_value(input_array):
            key = input_array.tobytes()
            if key in cache:
                cache.move_to_end(key, last=True)
                return cache[key]
            
            input_array = input_array.reshape((-1, 54, 6))
            #input_array = np.rollaxis(input_array, 2, 1)
            
            policy, value = model.predict(input_array)
            #policy, value = get_output([input_array, 0])
            policy = policy.reshape((12,))
            value = value[0, 0]

            cache[key] = (policy, value)
            if len(cache) > max_cache_size:
                cache.popitem(last=False)

            return policy, value

        return model_policy_value
示例#27
0
def get_grad(gen_image_array):
    '''计算损失函数的梯度'''
    if gen_image_array != (1, target_width, target_height, 3):
        gen_image_array = gen_image_array.reshape((1, target_width, target_height, 3))
    grad_fn = K.function([gModel.input], K.gradients(get_total_loss(gModel.input), [gModel.input]))
    grad = grad_fn([gen_image_array])[0].flatten().astype('float64')
    return grad
示例#28
0
def compile_saliency_function(model, activation_layer='block5_conv3'):
    input_img = model.input
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    layer_output = layer_dict[activation_layer].output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_img)[0]
    return K.function([input_img, K.learning_phase()], [saliency])
示例#29
0
def grad_cam(input_model, model_x, orig_x, category_index, layer_name, class_names):
    output = input_model.output

    final_layer = Lambda(lambda x: target_category_loss(x, category_index, len(class_names)))
    output = final_layer(output)
    model = Model(inputs=input_model.input, outputs=output)
    loss = K.sum(model.layers[-1].output)
    conv_output = model.get_layer(layer_name).output
    grads = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input, K.learning_phase()], [conv_output, grads])
    output, grads_val = gradient_function([model_x, 0])

    output, grads_val = output[0, :], grads_val[0, :, :, :]

    weights = np.mean(grads_val, axis=(0, 1))
    cam = np.zeros(output.shape[0: 2], dtype=np.float32)

    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    cam = np.maximum(cam, np.zeros(output.shape[0: 2], dtype=np.float32))
    cam = cam.squeeze()
    cam = cv2.applyColorMap(np.uint8(255 * cam / np.max(cam)), cv2.COLORMAP_JET)
    cam = cv2.resize(cam, (np.shape(orig_x)[0], np.shape(orig_x)[1]))
    cam = 0.4 * cam + 0.6 * orig_x
    return np.uint8(cam)
示例#30
0
def EvaluateJacobian(model):
	#theano.function( [model.layers[0].input], T.jacobian(model.layers[-1].output.flatten(), model.layers[0].input) )


	X = K.placeholder(shape=(15,15)) #specify the right placeholder
	Y = K.sum(K.square(X)) # loss function
	fn = K.function([X], K.gradients(Y, [X])) #function to call the gradient
    def get_model(self, pad_id):
        q_tokens = Input(shape=(None, ), name='q_tokens')

        qr_masks = [
            Input(shape=(None, ), name='qr_masks_%s' % j)
            for j in range(self.query_retrieval_number)
        ]

        qr_tokens = [
            Input(shape=(None, ), name='q_retrieval_%s' % j)
            for j in range(self.query_retrieval_number)
        ]

        pos_tokens = [
            Input(shape=(None, ), name='pos_tokens_%s' % j)
            for j in range(self.pos_number)
        ]

        neg_tokens = [
            Input(shape=(None, ), name='neg_fact_input_%s' % j)
            for j in range(self.neg_number)
        ]

        enc_query_output = self.__get_query_encoder(q_tokens, pad_id, 'query')
        enc_qrs_output = [self.__get_query_encoder(q_retrieval, pad_id, 'qr%s'%index) for index, q_retrieval in \
            enumerate(qr_tokens)]
        enc_pos_facts_output = [self.__get_fact_encoder(pos_fact, pad_id, 'pos%s'%index) for index, pos_fact in \
            enumerate(pos_tokens)]
        enc_neg_facts_output = [self.__get_fact_encoder(neg_fact, pad_id, 'neg%s'%index) for index, neg_fact in \
            enumerate(neg_tokens)]

        # query convolution and maxpooling
        query_emb = self.query_max(enc_query_output)
        query_sem = query_emb

        qrs_emb = QueryRetrievalEncoderMask(
            self.query_retrieval_number)(qr_masks + enc_qrs_output)

        # comment for 1 qr
        qrs_emb = [self.query_max(qr_emb) for qr_emb in qrs_emb]
        qrs_sem = qrs_emb

        # just choose one sentence, so the shape: bs, embedding_dim
        useful_sent = UsefulSentenceAutoPointer(
            tau=self.tau,
            batch_size=self.args.batch_size,
            query_retrieval_number=self.query_retrieval_number,
            use_transition=True)(qrs_sem)

        pos_facts_emb = [
            self.fact_max(pos_fact) for pos_fact in enc_pos_facts_output
        ]
        neg_facts_emb = [
            self.fact_max(neg_fact) for neg_fact in enc_neg_facts_output
        ]

        pos_facts_sem = pos_facts_emb
        neg_facts_sem = neg_facts_emb

        # cosine similarity
        #query_pos_facts_cosine = [Lambda(lambda x:K.sigmoid(x))(Dot(axes=1, normalize=True)([query_sem, pos_fact_sem])) for pos_fact_sem in pos_facts_sem]
        #query_neg_facts_cosine = [Lambda(lambda x:K.sigmoid(x))(Dot(axes=1, normalize=True)([query_sem, neg_fact_sem])) for neg_fact_sem in neg_facts_sem]
        query_pos_facts_cosine = [
            Dot(axes=1, normalize=True)([query_sem, pos_fact_sem])
            for pos_fact_sem in pos_facts_sem
        ]
        query_neg_facts_cosine = [
            Dot(axes=1, normalize=True)([query_sem, neg_fact_sem])
            for neg_fact_sem in neg_facts_sem
        ]
        auto_pos_facts_cosine = [
            Dot(axes=1, normalize=True)([useful_sent, pos_fact_sem])
            for pos_fact_sem in pos_facts_sem
        ]
        auto_neg_facts_cosine = [
            Dot(axes=1, normalize=True)([useful_sent, neg_fact_sem])
            for neg_fact_sem in neg_facts_sem
        ]

        query_pos_facts_cosine = [
            self.cosine_merger_layer(query_pos_facts_cosine +
                                     auto_pos_facts_cosine)
        ]
        query_neg_facts_cosine = self.cosine_merger_layer(
            query_neg_facts_cosine + auto_neg_facts_cosine)

        concat_cosine = Concatenate()(query_pos_facts_cosine +
                                      query_neg_facts_cosine)

        concat_cosine = Reshape(
            (self.pos_number + self.neg_number, 1))(concat_cosine)
        # gamma
        weight = np.asarray([1]).reshape(1, 1, 1)
        with_gamma = Conv1D(1,
                            1,
                            padding='same',
                            input_shape=(self.pos_number + self.neg_number, 1),
                            activation='linear',
                            use_bias=False,
                            weights=[weight])(concat_cosine)
        with_gamma = Reshape((self.pos_number + self.neg_number, ))(with_gamma)

        # softmax
        prob = self.output_softmax_layer(with_gamma)

        model = Model(inputs=[q_tokens] + qr_masks + qr_tokens + pos_tokens +
                      neg_tokens,
                      outputs=prob)

        model_pos_consine = K.function([q_tokens] + qr_masks + qr_tokens +
                                       pos_tokens, query_pos_facts_cosine)
        query_embeddings = [enc_query_output]
        pos_embeddings = enc_pos_facts_output
        query_word_embedding_fn = K.function([q_tokens], query_embeddings)
        pos_embedding_fn = K.function(pos_tokens, pos_embeddings)

        return model, model_pos_consine, query_word_embedding_fn, pos_embedding_fn
示例#32
0
    def main(self, name, opts):
        logging.basicConfig(filename=opts.log_file,
                            format='%(levelname)s (%(asctime)s): %(message)s')
        log = logging.getLogger(name)
        if opts.verbose:
            log.setLevel(logging.DEBUG)
        else:
            log.setLevel(logging.INFO)
            log.debug(opts)

        if opts.seed is not None:
            np.random.seed(opts.seed)

        if not opts.model_files:
            raise ValueError('No model files provided!')

        log.info('Loading model ...')
        K.set_learning_phase(0)
        model = mod.load_model(opts.model_files)

        # Get DNA layer.
        dna_layer = None
        for i, name in enumerate(model.input_names):
            if name == 'dna':
                dna_layer = model.input_layers[i]
                break
        if not dna_layer:
            raise ValueError('The provided model is not a DNA model!')

        # Create output vector.
        outputs = []
        for output in model.outputs:
            outputs.append(K.reshape(output, (-1, 1)))
        outputs = K.concatenate(outputs, axis=1)

        # Compute gradient of outputs wrt. DNA layer.
        grads = []
        for name in opts.targets:
            if name == 'mean':
                target = K.mean(outputs, axis=1)
            elif name == 'var':
                target = K.var(outputs, axis=1)
            else:
                raise ValueError('Invalid effect size "%s"!' % name)
            grad = K.gradients(target, dna_layer.output)
            grads.extend(grad)
        grad_fun = K.function(model.inputs, grads)

        log.info('Reading data ...')
        nb_sample = dat.get_nb_sample(opts.data_files, opts.nb_sample)
        replicate_names = dat.get_replicate_names(
            opts.data_files[0],
            regex=opts.replicate_names,
            nb_key=opts.nb_replicate)
        data_reader = mod.data_reader_from_model(
            model, outputs=False, replicate_names=replicate_names)
        data_reader = data_reader(opts.data_files,
                                  nb_sample=nb_sample,
                                  batch_size=opts.batch_size,
                                  loop=False,
                                  shuffle=False)

        meta_reader = hdf.reader(opts.data_files, ['chromo', 'pos'],
                                 nb_sample=nb_sample,
                                 batch_size=opts.batch_size,
                                 loop=False,
                                 shuffle=False)

        out_file = h5.File(opts.out_file, 'w')
        out_group = out_file

        def h5_dump(path, data, idx, dtype=None, compression='gzip'):
            if path not in out_group:
                if dtype is None:
                    dtype = data.dtype
                out_group.create_dataset(
                    name=path,
                    shape=[nb_sample] + list(data.shape[1:]),
                    dtype=dtype,
                    compression=compression
                )
            out_group[path][idx:idx+len(data)] = data

        log.info('Computing effects ...')
        progbar = ProgressBar(nb_sample, log.info)
        idx = 0
        for inputs in data_reader:
            if isinstance(inputs, dict):
                inputs = list(inputs.values())
            batch_size = len(inputs[0])
            progbar.update(batch_size)

            # Compute gradients.
            grads = grad_fun(inputs)

            # Slice window at center.
            if opts.dna_wlen:
                for i, grad in enumerate(grads):
                    delta = opts.dna_wlen // 2
                    ctr = grad.shape[1] // 2
                    grads[i] = grad[:, (ctr-delta):(ctr+delta+1)]

            # Aggregate effects in window
            if opts.agg_effects:
                for i, grad in enumerate(grads):
                    if opts.agg_effects == 'mean':
                        grad = grad.mean(axis=1)
                    elif opts.agg_effects == 'wmean':
                        weights = linear_weights(grad.shape[1])
                        grad = np.average(grad, axis=1, weights=weights)
                    elif opts.agg_effects == 'max':
                        grad = grad.max(axis=1)
                    else:
                        tmp = 'Invalid function "%s"!' % (opts.agg_effects)
                        raise ValueError(tmp)
                    grads[i] = grad

            # Write computed effects
            for name, grad in zip(opts.targets, grads):
                h5_dump(name, grad, idx)

            # Store inputs
            if opts.store_inputs:
                for name, value in zip(model.input_names, inputs):
                    h5_dump(name, value, idx)

            # Store positions
            for name, value in next(meta_reader).items():
                h5_dump(name, value, idx)

            idx += batch_size
        progbar.close()

        out_file.close()
        log.info('Done!')

        return 0
示例#33
0
#  'kernel_constraint': None,
#  'kernel_initializer': {'class_name': 'VarianceScaling',
#  'config': {'distribution': 'uniform',
#  'mode': 'fan_avg',
#  'scale': 1.0,
#  'seed': None}},
#  'kernel_regularizer': {'class_name': 'L1L2',
#  'config': {'l1': 0.0, 'l2': 0.0017999999690800905}},
#  'name': 'dense_3',
#  'trainable': True,
#  'units': 1,
#  'use_bias': True}

#-------------------------
# get action potential matrix of layer 0, output = activation(dot(input, kernel) + bias), https://keras.io/layers/core/#dense
get_0th_layer_output = K.function([new_model.layers[0].input],
                                  [new_model.layers[0].output])
zerolayer_output = get_0th_layer_output([aa_train_x
                                         ])[0]  # this is a list with len of 1
zerolayer_output.shape
#(903, 180)

# get weights matrix  from layer 0
len(new_model.layers[0].get_weights()[0])  # number of rows in weight matrix
zerolayer_weights = np.zeros((3092, 180), dtype=float)
for i in range(len(new_model.layers[0].get_weights()[0])):
    zerolayer_weights[i, :] = new_model.layers[0].get_weights()[0][i]

# the input training matrix
aa_train_x.shape  #(903, 3092)

# get pij = 1/m * sum over m (abs(wji*xi +bj)) from the paper https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7280626
    def _generate_filter_image(input_img,
                               layer_output,
                               filter_index):
        """Generates image for one particular filter.
        # Arguments
            input_img: The input-image Tensor.
            layer_output: The output-image Tensor.
            filter_index: The to be processed filter number.
                          Assumed to be valid.
        #Returns
            Either None if no image could be generated.
            or a tuple of the image (array) itself and the last loss.
        """
        s_time = time.time()

        # we build a loss function that maximizes the activation
        # of the nth filter of the layer considered
        if K.image_data_format() == 'channels_first':
            loss = K.mean(layer_output[:, filter_index, :, :])
        else:
            loss = K.mean(layer_output[:, :, :, filter_index])

        # we compute the gradient of the input picture wrt this loss
        grads = K.gradients(loss, input_img)[0]

        # normalization trick: we normalize the gradient
        grads = normalize(grads)

        # this function returns the loss and grads given the input picture
        iterate = K.function([input_img], [loss, grads])

        # we start from a gray image with some random noise
        intermediate_dim = tuple(
            int(x / (upscaling_factor ** upscaling_steps)) for x in output_dim)
        if K.image_data_format() == 'channels_first':
            input_img_data = np.random.random(
                (1, 3, intermediate_dim[0], intermediate_dim[1]))
        else:
            input_img_data = np.random.random(
                (1, intermediate_dim[0], intermediate_dim[1], 3))
        input_img_data = (input_img_data - 0.5) * 20 + 128

        # Slowly upscaling towards the original size prevents
        # a dominating high-frequency of the to visualized structure
        # as it would occur if we directly compute the 412d-image.
        # Behaves as a better starting point for each following dimension
        # and therefore avoids poor local minima
        for up in reversed(range(upscaling_steps)):
            # we run gradient ascent for e.g. 20 steps
            for _ in range(epochs):
                loss_value, grads_value = iterate([input_img_data])
                input_img_data += grads_value * step

                # some filters get stuck to 0, we can skip them
                if loss_value <= K.epsilon():
                    return None

            # Calculate upscaled dimension
            intermediate_dim = tuple(
                int(x / (upscaling_factor ** up)) for x in output_dim)
            # Upscale
            img = deprocess_image(input_img_data[0])
            img = np.array(pil_image.fromarray(img).resize(intermediate_dim,
                                                           pil_image.BICUBIC))
            input_img_data = np.expand_dims(
                process_image(img, input_img_data[0]), 0)

        # decode the resulting input image
        img = deprocess_image(input_img_data[0])
        e_time = time.time()
        print('Costs of filter {:3}: {:5.0f} ( {:4.2f}s )'.format(filter_index,
                                                                  loss_value,
                                                                  e_time - s_time))
        return img, loss_value
from skimage.measure import block_reduce
from function import newtxt,newimagedata,create_plots,plot_confusion_matrix,cnn_model,cnn_model1,cnn_model2
 #%%
img_path = './2012image/201208172_T-12-46-15_Dive_01_017.jpg'
img = imread(img_path)
#%%
divide_image = np.zeros([3468,30,30,3])
for i in range(51):
    for j in range(68):
        divide_image[(i+1)*j,:,:,:] = img[i*30:(i+1)*30,j*30:(j+1)*30]
        
#%%
#K.set_learning_phase(1) #set learning phase
model = cnn_model()
model.load_weights('model_weight/2012images-areas-7.5-50epoch.h5')
cnn_output = K.function([model.layers[0].input], [model.layers[19].output])
f1 = cnn_output([divide_image])[0]
#%%
kmeans = MiniBatchKMeans(n_clusters=300,
        random_state=0,
        batch_size=128,
        max_iter=10).fit(f1)
#%%
code_book = kmeans.cluster_centers_






示例#36
0
    model.save(model_path_save)

    # Layers sizes
    input("Press Enter to continue.")
    bot_lay_size = 2048
    n_train_imgs = imgs_train.shape[0]
    n_test_imgs = imgs_test.shape[0]
    n_classes = lab_train_ohe.shape[1]
    print("bot lay size:", bot_lay_size)
    print("train_imgs", n_train_imgs)
    print('test imgs', n_test_imgs)
    print("classes", n_classes)

    # backend function to accesss values from bottle layer
    bottle_tensor_func = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.get_layer('fc_1_act').output])
    #set up np.array to store values for all images
    bottle_tensor_train = np.zeros(shape=(n_train_imgs, bot_lay_size))
    bottle_labels_train = np.zeros(shape=(n_train_imgs, n_classes))
    bottle_tensor_test = np.zeros(shape=(n_test_imgs, bot_lay_size))
    bottle_labels_test = np.zeros(shape=(n_test_imgs, n_classes))

    def batcher(X_train, y_train, size):
        X_batch = [
            X_train[indx:indx + size] for indx in range(0, len(X_train), size)
        ]
        y_batch = [
            y_train[indx:indx + size] for indx in range(0, len(y_train), size)
        ]
        return zip(X_batch, y_batch)
 def get_f_layer(self, layer_name):
     return K.function([self.net_input],
                       [self.get_layer_output(layer_name)])
    style_reference_features = layer_features[1, :, :, :]
    combination_features = layer_features[3, :, :, :]
    sl = style_loss(style_reference_features, combination_features)
    loss += (style_weight / len(feature_layers)) * sl
loss += total_variation_weight * total_variation_loss(combination_image)

# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)

outputs = [loss]
if type(grads) in {list, tuple}:
    outputs += grads
else:
    outputs.append(grads)

f_outputs = K.function([combination_image], outputs)


def eval_loss_and_grads(x):
    x = x.reshape((1, 3, img_width, img_height))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values


"""
"""
示例#39
0
b1, b2 = pearsonr(Y_NN, H_e)
print "NN rho-value"
print b1

S, unique, pk = DataFunctions.calculate_entropy(H_t)
print "The entropy value is: ", S

#Saving files
numpy.save('bldg1_B.npy', Y_lstm2)
numpy.save('bldg1_MLP.npy', Y_NN)

############################################################

###########Extracting Data for multi-timescale Analysis
get_1st_layer_output = K.function([save_model.layers[0].input],
                                  [save_model.layers[1].output])
lstm_h1 = int(lstm_hidden['Layer1'])
h_input = numpy.zeros((len(train_data2), lstm_h1))
h_val = numpy.zeros((len(val_data2), lstm_h1))
h_test = numpy.zeros((len(test_data2), lstm_h1))

###Trouble shoot###
print "Len(train1)", len(train_data)
print "Len(train2)", len(train_data2)

for i in range(0, len(train_data)):
    X_temp = train_data[i, :, :]
    X_temp = X_temp[None, :, :]
    h_temp = numpy.asarray(get_1st_layer_output([X_temp]))
    h_input[i * 24:(i + 1) * 24, :] = h_temp[0, :]
示例#40
0
    def __init__(self, replay_filename, group_name, model_filename=''):
        # Set learning phase to TEST
        self.learning_phase = TEST_MODE

        # Loads ReplayData file
        self.replay_data = h5py.File('{}'.format(replay_filename), 'r')
        try:
            self.group = self.replay_data[group_name]
        except KeyError:
            self.group = self.replay_data[group_name + '_init']
            group_name += '_init'

        self.group_name = group_name

        # If not informed, defaults to '_model' suffix
        if model_filename == '':
            model_filename = '{}_model.h5'.format(group_name)
        # Loads Keras model
        self.model = load_model(model_filename)

        # Retrieves some basic information from the replay data
        self.inputs = self.group['inputs'][:]
        self.targets = self.group['targets'][:]
        self.n_epochs = self.group.attrs['n_epochs']
        self.n_layers = self.group.attrs['n_layers']

        # Generates ranges for the number of different weight arrays in each layer
        self.n_weights = [
            range(len(self.group['layer{}'.format(l)]))
            for l in range(self.n_layers)
        ]

        # Retrieves weights as a list, each element being one epoch
        self.weights = self._retrieve_weights()

        # Gets Tensors for the weights in the same order as the layers
        # Keras' model.weights returns the Tensors in a different order!
        self._model_weights = [
            w for layer in self.model.layers for w in layer.weights
        ]

        ### Functions
        # Keras function to get the outputs, given inputs and weights
        self._get_output = K.function(inputs=[K.learning_phase()] +
                                      self.model.inputs + self._model_weights,
                                      outputs=[self.model.layers[-1].output])
        # Keras function to get the loss and metrics, given inputs, targets, weights and sample weights
        self._get_metrics = K.function(
            inputs=[K.learning_phase()] + self.model.inputs +
            self.model.targets + self._model_weights +
            self.model.sample_weights,
            outputs=[self.model.total_loss] + self.model.metrics_tensors)
        # Keras function to compute the binary cross entropy, given inputs, targets, weights and sample weights
        self._get_binary_crossentropy = K.function(
            inputs=[K.learning_phase()] + self.model.inputs +
            self.model.targets + self._model_weights +
            self.model.sample_weights,
            outputs=[
                K.binary_crossentropy(self.model.targets[0],
                                      self.model.outputs[0])
            ])

        # Keras function to compute the gradients for trainable weights, given inputs, targets, weights and
        # sample weights
        self.__trainable_weights = [
            w for layer in self.model.layers for w in layer.trainable_weights
            if layer.trainable and ('bias' not in w.op.name)
        ]
        self.__trainable_gradients = self.model.optimizer.get_gradients(
            self.model.total_loss, self.__trainable_weights)
        self._get_gradients = K.function(
            inputs=[K.learning_phase()] + self.model.inputs +
            self.model.targets + self._model_weights +
            self.model.sample_weights,
            outputs=self.__trainable_gradients)

        def get_z_op(layer):
            op = layer.output.op
            if op.type in Z_OPS:
                return layer.output
            else:
                op_layer_name = op.name.split('/')[0]
                for input in op.inputs:
                    input_layer_name = input.name.split('/')[0]
                    if (input.op.type in Z_OPS) and (op_layer_name
                                                     == input_layer_name):
                        return input
                return None

        __z_layers = np.array([
            i for i, layer in enumerate(self.model.layers)
            if get_z_op(layer) is not None
        ])
        __act_layers = np.array([
            i for i, layer in enumerate(self.model.layers)
            if layer.output.op.type.lower() in ACTIVATIONS
        ])
        __z_layers = np.array([
            __z_layers[np.argmax(layer < __z_layers) - 1]
            for layer in __act_layers
        ])
        self.z_act_layers = [self.model.layers[i].name for i in __z_layers]

        self._z_layers = ['inputs'
                          ] + [self.model.layers[i].name for i in __z_layers]
        self._z_tensors = [K.identity(self.model.inputs)] + list(
            filter(lambda t: t is not None,
                   [get_z_op(self.model.layers[i]) for i in __z_layers]))

        self._activation_layers = ['inputs'] + [
            self.model.layers[i].name for i in __act_layers
        ]
        self._activation_tensors = [K.identity(self.model.inputs)] + [
            self.model.layers[i].output for i in __act_layers
        ]

        # Keras function to compute the Z values given inputs and weights
        self._get_zvalues = K.function(inputs=[K.learning_phase()] +
                                       self.model.inputs + self._model_weights,
                                       outputs=self._z_tensors)
        # Keras function to compute the activation values given inputs and weights
        self._get_activations = K.function(inputs=[K.learning_phase()] +
                                           self.model.inputs +
                                           self._model_weights,
                                           outputs=self._activation_tensors)

        # Gets names of all layers with arrays of weights of lengths 1 (no biases) or 2 (with biases)
        # Layers without weights (e.g. Activation, BatchNorm) are not included
        self.weights_layers = [
            layer.name
            for layer, weights in zip(self.model.layers, self.n_weights)
            if len(weights) in (1, 2)
        ]

        # Attributes for the visualizations - Data
        self._feature_space_data = None
        self._loss_hist_data = None
        self._loss_and_metric_data = None
        self._prob_hist_data = None
        self._decision_boundary_data = None
        self._weights_violins_data = None
        self._activations_violins_data = None
        self._zvalues_violins_data = None
        self._gradients_data = None
        # Attributes for the visualizations - Plot objects
        self._feature_space_plot = None
        self._loss_hist_plot = None
        self._loss_and_metric_plot = None
        self._prob_hist_plot = None
        self._decision_boundary_plot = None
        self._weights_violins_plot = None
        self._activations_violins_plot = None
        self._zvalues_violins_plot = None
        self._gradients_plot = None
示例#41
0
else:
    model.add(Dense(k2, activation='softmax', input_dim=k1))
model.add(Dense(cY, activation='softmax', input_dim=k2))
model.layers[0].trainable = False
sgd = SGD(lr=4, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=.01)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])
# with a Sequential model
start_time = time.time()
Pxy_hat = get_p_hat_mat(XLabels, YLabels)
Bxy_hat = p2b(Pxy_hat)
Px_hat = np.sum(Pxy_hat, axis=0)
Py_hat = np.sum(Pxy_hat, axis=1)
U, s, V = np.linalg.svd(Bxy_hat)
s1 = K.function([model.layers[0].input],
                [model.layers[0].output])([np.eye(cX)])[0]
# get the value of s1.
# Return the output of the first layer given a certain input np.eye(cX)
s1m = np.matmul(Px_hat.reshape(1, -1), s1)
Phi1 = info_mat(s1, Px_hat)
plt.figure(figsize=(9, 3))

model.fit(XLabels, YLabels, verbose=0, batch_size=batch_size, epochs=nEpochs)
v2 = model.get_weights()[4].T
Psi2 = info_mat(v2, Py_hat)
b2 = model.get_weights()[5]
b20 = b2 - np.dot(b2, Py_hat)

s2 = K.function([model.layers[0].input],
                [model.layers[1].output])([np.eye(cX)])[0]
s2m = np.matmul(Px_hat.reshape(1, -1), s2)
	print("\nCompiling model\n")
	tgt = Input(shape=(max_sents,SENT_DIM), dtype='float32')
	srcs = Input(shape=(max_sents,SENT_DIM), dtype='float32')
	attention = _Attention(max_sents,SENT_DIM,dropout=0.2)(tgt,srcs)
	align1= _SoftAlignment(max_sents,SENT_DIM)(srcs,attention)
	align2= _SoftAlignment(max_sents,SENT_DIM)(tgt,attention,transpose=True)
	vec_l = _Comparison(max_sents,SENT_DIM,dropout=0.2)(tgt,align1)
	vec_r = _Comparison(max_sents,SENT_DIM,dropout=0.2)(srcs,align2)
	pds = _Entailment(SENT_DIM,NUM_CLASSES,dropout=0.2)(vec_l,vec_r)
	model = Model(inputs=[tgt,srcs],outputs=pds)
	model.summary()
	model.compile(loss="categorical_crossentropy",optimizer=Adam(lr=0.001),metrics=["accuracy"])

	cb = [ModelCheckpoint("temp1_model.hdf5",monitor="val_loss",verbose=1,save_weights_only=True,save_best_only=True)]

	get_attention_matrix = K.function([model.layers[0].input,model.layers[1].input,K.learning_phase()],[model.layers[3].output])

	NUM_EPOCHS = 30
	BATCH_SIZE = 25

	print("\nTraining model\n")
	history = model.fit(x=[target_vecs[train],source_vecs[train]],y=gold[train],batch_size=BATCH_SIZE,validation_split=0.1,epochs=NUM_EPOCHS,shuffle=True,verbose=1,callbacks=cb)
	model.load_weights("temp1_model.hdf5")

	preds = model.predict([target_vecs[test],source_vecs[test]])
	probs.append(preds)
	preds = np.argmax(preds,axis=1)
	gold_test = np.argmax(gold[test],axis=1)
	predictions.append(preds)
	golds.append(gold_test)
	target.append(targets[test])
def plot_sample(sample_number, input_number, neurons, x_train_dstack,
                y_train_dstack, model, seq_dur, i, plot_dir, f, string_name):

    frecuencias = []
    seq_dur = len(x_train_dstack[sample_number, :, 0])
    test = x_train_dstack[sample_number:sample_number + 1, :, :]
    colors = cm.rainbow(np.linspace(0, 1, neurons + 1))
    y_pred = model.predict(test)

    ###################################

    # Status for the sample value at the layer indicated
    capa = 0

    #First Layer:
    get_0_layer_output = K.function([model.layers[capa].input],
                                    [model.layers[capa].output])
    layer_output = get_0_layer_output([test])[capa]

    #layer_output= model.layers[0].output
    #print("layer_output",layer_output)

    #Second Layer:
    get_1_layer_output = K.function([model.layers[capa].input],
                                    [model.layers[capa].output])
    #layer_output_1     = get_1_layer_output([test])[capa]

    layer_output_T = layer_output.T
    print("layer_output", layer_output_T)
    array_red_list = []

    ####################################

    y_pred = model.predict(test)

    # To generate the Populational Analysis

    for ii in np.arange(0, neurons, 1):
        neurona_serie = np.reshape(layer_output_T[ii], len(layer_output_T[ii]))
        array_red_list.append(neurona_serie)

    #SVD and PCA with scikit learn

    array_red = np.asarray(array_red_list)
    sdv = sklearn.decomposition.TruncatedSVD(n_components=2)
    sdv_3d = sklearn.decomposition.TruncatedSVD(n_components=3)
    X_2d = sdv.fit_transform(array_red.T)
    X_3d = sdv_3d.fit_transform(array_red.T)

    pca = PCA(n_components=3)
    X_pca_ = pca.fit(array_red)
    X_pca = pca.components_

    ####################################

    #2-Dim plots

    fig = plt.figure()
    fig.suptitle("SDV Network Population Analysis", fontsize=20)
    ax1 = fig.add_subplot(111)

    ax1.plot(X_2d[:, 0],
             X_2d[:, 1],
             c='g',
             marker="p",
             zorder=2,
             label='Dim Reduction of the network')
    ax1.scatter(X_2d[0, 0], X_2d[0, 1], c='r', marker='^', s=70, label='start')
    ax1.scatter(X_2d[-1, 0],
                X_2d[-1, 1],
                c='b',
                marker='^',
                s=70,
                label='stop')
    plt.legend(loc='upper left', fontsize='x-small')
    plt.ylabel('C1')
    plt.xlabel('C2')
    plt.ylim([-3, 3])
    plt.xlim([-4, 4])

    figname = str(plot_dir) + "/sample_" + str(sample_number) + "_sdv_" + str(
        capa) + "_individual_neurons_state_" + str(i) + ".png"
    #plt.savefig(figname,dpi=200)
    plt.close()

    print("------------")
    ordeno_primero_x = X_pca[0]
    ordeno_primero_y = X_pca[1]
    ordeno_primero_z = X_pca[2]

    fig = plt.figure()
    fig.suptitle("PCA Network Population Analysis", fontsize=20)
    ax1 = fig.add_subplot(111)

    ax1.plot(X_pca[0],
             X_pca[1],
             c='c',
             marker="p",
             zorder=2,
             label='Dim Reduction of the network')
    ax1.scatter(ordeno_primero_x[0],
                ordeno_primero_y[0],
                s=70,
                c='r',
                marker="^",
                label='start')
    ax1.scatter(ordeno_primero_x[-1],
                ordeno_primero_y[-1],
                s=70,
                c='b',
                marker="^",
                label='stop')
    plt.legend(loc='upper left', fontsize='x-small')
    plt.ylabel('C1')
    plt.xlabel('C2')
    plt.ylim([-0.3, 0.3])
    plt.xlim([-0.15, 0.15])
    figname = str(plot_dir) + "/sample_" + str(sample_number) + "_pca_" + str(
        capa) + "_individual_neurons_state_" + str(i) + ".png"
    #plt.savefig(figname,dpi=200)
    plt.close()
    #pp.show()

    ####################################
    #3-Dim Plots

    # How many 3d angular views you want to define

    yy = np.arange(0, 180, 10)
    #yy        = np.arange(0,90,10)
    #yy        = np.arange(70,80,10)

    for ii, kk in enumerate(yy):
        print("ii: ", ii, " kk: ", kk)

        fig = plt.figure(figsize=(10, 8))
        fig.suptitle("3D plot SDV Network Population Analysis", fontsize=20)
        ax = fig.add_subplot(111, projection='3d')

        ax.plot(X_3d[:, 0],
                X_3d[:, 1],
                X_3d[:, 2],
                color='gray',
                zorder=2,
                label="3 d plot",
                marker="p")

        #The the approximate vertices
        ax.scatter(X_3d[0, 0],
                   X_3d[0, 1],
                   X_3d[0, 2],
                   c='r',
                   marker="^",
                   label='start',
                   s=300)
        ax.scatter(X_3d[-1, 0],
                   X_3d[-1, 1],
                   X_3d[-1, 2],
                   c='b',
                   marker="^",
                   label='stop',
                   s=300)
        ax.scatter(X_3d[40, 0],
                   X_3d[40, 1],
                   X_3d[40, 2],
                   c='deepskyblue',
                   marker="^",
                   label='v1: (0,0,0)',
                   s=300)
        ax.scatter(X_3d[100, 0],
                   X_3d[100, 1],
                   X_3d[100, 2],
                   c='gold',
                   marker="^",
                   label='v2: (1,1,1)',
                   s=300)
        ax.scatter(X_3d[180, 0],
                   X_3d[180, 1],
                   X_3d[180, 2],
                   c='pink',
                   marker="^",
                   label='v3: (1,0,1)',
                   s=300)
        ax.scatter(X_3d[240, 0],
                   X_3d[240, 1],
                   X_3d[240, 2],
                   c='green',
                   marker="^",
                   label='v4: (0,1,1)',
                   s=300)
        ax.scatter(X_3d[320, 0],
                   X_3d[320, 1],
                   X_3d[320, 2],
                   c='m',
                   marker="^",
                   label='v5:(0,0,1)',
                   s=300)
        ax.scatter(X_3d[380, 0],
                   X_3d[380, 1],
                   X_3d[380, 2],
                   c='y',
                   marker="^",
                   label='v6: (1,1,0)',
                   s=300)
        ax.scatter(X_3d[460, 0],
                   X_3d[460, 1],
                   X_3d[460, 2],
                   c='hotpink',
                   marker="^",
                   label='v7: (1,0,0)',
                   s=300)
        ax.scatter(X_3d[520, 0],
                   X_3d[520, 1],
                   X_3d[520, 2],
                   c='deeppink',
                   marker="^",
                   label='v8:(0,1,0)',
                   s=300)

        ax.set_xlabel('comp 1 (arb. units)', size=16)
        ax.set_ylabel('comp 2 (arb. units)', size=16)
        ax.set_zlabel('comp 3 (arb. units)', size=16)
        ax.legend()
        ax.view_init(elev=10, azim=kk)
        #ax.view_init(elev=kk, azim=10)
        figname = str(plot_dir) + "/sample_" + str(
            sample_number) + "_sdv_3d_" + str(
                capa) + "_individual_neurons_state_" + str(i) + '_' + str(
                    kk) + ".png"
        plt.savefig(figname, dpi=200)
        plt.close()

    ####################################

    #kk=70
    for ii, kk in enumerate(yy):

        fig = plt.figure(figsize=(18, 7))
        plt.subplot(3, 2, 1)
        plt.plot(test[0, :, 0], color='pink', label='Input 1')
        plt.plot(y_train_dstack[sample_number, :, 0],
                 color='grey',
                 linewidth=3,
                 label='Target Output 1')
        plt.plot(y_pred[0, :, 0],
                 color='r',
                 linewidth=2,
                 label=' Output\n 25 individual states')
        plt.legend(fontsize='x-small', loc=3)

        plt.subplot(3, 2, 3)
        plt.plot(test[0, :, 1], color='pink', label='Input  2')
        plt.plot(y_train_dstack[sample_number, :, 1],
                 color='grey',
                 linewidth=3,
                 label='Target Output2')
        plt.plot(y_pred[0, :, 1], color='r', linewidth=2, label=' Output 2')
        plt.xlim(0, seq_dur + 1)
        plt.ylim([-1.5, 1.5])
        plt.yticks([])
        #plt.ylabel('Activity [arb. units]',fontsize = 16)
        #plt.xlabel('time [mS]',fontsize = 16)
        #plt.xticks(np.arange(0,seq_dur+1,20),fontsize = 8)
        plt.legend(fontsize='x-small', loc=1)

        plt.subplot(3, 2, 5)
        plt.plot(test[0, :, 2], color='pink', label='Input 3')
        plt.plot(y_train_dstack[sample_number, :, 2],
                 color='grey',
                 linewidth=3,
                 label='Target Output 3')
        plt.plot(y_pred[0, :, 2], color='r', linewidth=2, label=' Output')
        plt.xlim(0, seq_dur + 1)
        plt.ylim([-1.5, 1.5])
        plt.yticks([])
        #plt.ylabel('Activity [arb. units]',fontsize = 16)
        plt.xlabel('time [mS]', fontsize=16)
        #plt.xticks(np.arange(0,seq_dur+1,20),fontsize = 8)
        plt.legend(fontsize='x-small', loc=1)

        fig.suptitle("Time series and PCA 3D plot", fontsize=20)
        ax = fig.add_subplot(122, projection='3d')
        x = X_pca[0]
        y = X_pca[1]
        z = X_pca[2]
        N = len(z)

        ax.plot(X_3d[:, 0],
                X_3d[:, 1],
                X_3d[:, 2],
                color='gray',
                zorder=2,
                label="3 d plot",
                marker="p")
        ax.scatter(X_3d[0, 0],
                   X_3d[0, 1],
                   X_3d[0, 2],
                   c='r',
                   marker="^",
                   label='start',
                   s=300)
        ax.scatter(X_3d[-1, 0],
                   X_3d[-1, 1],
                   X_3d[-1, 2],
                   c='b',
                   marker="^",
                   label='stop',
                   s=300)
        ax.set_xlabel(' 1 (arb. units)', size=16)
        ax.set_ylabel(' 2 (arb. units)', size=16)
        ax.set_zlabel(' 3 (arb. units)', size=16)
        ax.axes.get_xaxis().set_ticks([])
        ax.axes.get_yaxis().set_ticks([])

        ax.set_zticks(())
        ax.view_init(elev=10, azim=kk)
        #ax.view_init(elev=kk, azim=10)
        ax.legend(fontsize='small')
        fig.text(0.1,
                 0.5,
                 'Amplitude [Arb. Units]',
                 va='center',
                 ha='center',
                 rotation='vertical',
                 fontsize=16)
        figname = str(plot_dir) + "/sample_" + str(
            sample_number) + "_pca_3D_" + str(
                capa) + "_individual_neurons_state_" + str(i) + '_' + str(
                    kk) + "_" + str(f) + ".png"
        plt.savefig(figname, dpi=300, bbox_inches='tight')
        plt.close()
# create layer name to layer dictionary
layer_dict = {layer.name: layer for layer in model.layers}
print(layer_dict)
# layer_dict

# visualize gradients at pooling layers
num_pool_layers = 5
lr = 0.01
fig, axes = plt.subplots(1, num_pool_layers, figsize=(20, 10))
for i in range(num_pool_layers):
    layer_name = "block{:d}_pool".format(i + 1)
    layer_output = layer_dict[layer_name].output
    loss = K.mean(layer_output)
    grads = K.gradients(loss, dream)[0]
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) * lr
    f = K.function([dream], [loss, grads])
    img_value = p_img.copy()
    loss_value, grads_value = f([img_value])
    axes[i].set_title(layer_name)
    axes[i].imshow(deprocess(grads_value))

plt.tight_layout()
plt.show()

# deep dreaming
first_layer = model.layers[-1]
input_img = first_layer.input
print(first_layer.name, first_layer.output_shape)

num_pool_layers = 5
num_iters_per_layer = 3
示例#45
0
def saveData(model, X_train, y_train, X_test, y_test, nb_classes):
    for layer_index in range(1, len(model.layers), 1):
        if layer_index % 10 == 0:
            print('Layer index is ' + str(layer_index))
        shouldIContinue = True
        if 'conv2d' in model.layers[layer_index].name and layer_index > 30:
            shouldIContinue = False
        if 'add' in model.layers[layer_index].name:
            shouldIContinue = False
        if 'flatten' in model.layers[layer_index].name:
            shouldIContinue = False
        if 'dense' in model.layers[layer_index].name:
            shouldIContinue = False
        if shouldIContinue:
            continue
        if layer_index % 20 == 0:
            print("think about it")
        directory = 'C:\cifar//residual//layer_output//layer_' + str(
            layer_index) + '_' + model.layers[layer_index].name
        if os.path.exists(directory):
            print('Folder already exists')
        else:
            os.mkdir(directory)

        get_k_layer_output = K.function([model.layers[0].input],
                                        [model.layers[layer_index].output])
        batchSize = 2000
        with open(directory + '//trainingData.txt', 'w',
                  newline='') as csvfile:
            spamwriter = csv.writer(csvfile, delimiter=' ')
            for startingImageIndex in range(int(len(X_train) / batchSize)):
                sample = X_train[startingImageIndex *
                                 batchSize:startingImageIndex * batchSize +
                                 batchSize]
                layer_output = get_k_layer_output([sample])[0]
                for imageIndex in range(len(sample)):
                    output = layer_output[imageIndex].flatten()
                    index = np.argmax(output)
                    row = [0] * len(output)
                    row[index] = 1
                    spamwriter.writerow(output)
        with open(directory + '//trainingLabel.txt', 'w',
                  newline='') as csvfile:
            spamwriter = csv.writer(csvfile, delimiter=' ')
            for label in y_train:
                row = [0] * nb_classes
                row[label[0]] = 1
                spamwriter.writerow(row)
        with open(directory + '//testingData.txt', 'w', newline='') as csvfile:
            spamwriter = csv.writer(csvfile, delimiter=' ')
            for startingImageIndex in range(int(len(X_test) / batchSize)):
                sample = X_test[startingImageIndex:startingImageIndex +
                                batchSize]
                layer_output = get_k_layer_output([sample])[0]
                for imageIndex in range(len(sample)):
                    output = layer_output[imageIndex].flatten()
                    index = np.argmax(output)
                    row = [0] * len(output)
                    row[index] = 1
                    spamwriter.writerow(output)
        with open(directory + '//testingLabel.txt', 'w',
                  newline='') as csvfile:
            spamwriter = csv.writer(csvfile, delimiter=' ')
            for label in y_test:
                row = [0] * nb_classes
                row[label[0]] = 1
                spamwriter.writerow(row)
示例#46
0
def test_time_distributed_softmax():
    x = K.placeholder(shape=(1, 1, 5))
    f = K.function([x], [activations.softmax(x)])
    test_values = get_standard_values()
    test_values = np.reshape(test_values, (1, 1, np.size(test_values)))
    f([test_values])[0]
示例#47
0
from keras.models import load_model
from keras import backend as K
import pandas as pd
import numpy as np
import pickle
'''
根据训练的脸型模型,提取倒数第二层全连接层的信息,作为特征
'''

if __name__ == '__main__':
    model = load_model('./model/vggface1-weights-improvement-03-0.82.hdf5')
    layer_1 = K.function([model.layers[0].input], [model.layers[-2].output])

    # data
    print('read data...')
    train_data = np.load('./train/images.npy')
    train_data = train_data / 255.0
    train_data = np.transpose(train_data, (0, 3, 1, 2))
    print('train_data shape: ', train_data.shape)
    print('Data is done!')

    batch = 32
    print('get features...')
    for i in range(int(np.ceil(len(train_data) / batch))):
        if (i + 1) * batch > len(train_data):
            batch_data = train_data[i * batch:len(train_data)]
        else:
            batch_data = train_data[i * batch:(i + 1) * batch]
        if i == 0:
            f_vgg = layer_1([batch_data])[0]
            # print(np.shape(f_vgg))
示例#48
0
    def CreateModel(self):
        '''
		定义CNN/LSTM/CTC模型,使用函数式模型
		输入层:39维的特征值序列,一条语音数据的最大长度设为1500(大约15s)
		隐藏层一:1024个神经元的卷积层
		隐藏层二:池化层,池化窗口大小为2
		隐藏层三:Dropout层,需要断开的神经元的比例为0.2,防止过拟合
		隐藏层四:循环层、LSTM层
		隐藏层五:Dropout层,需要断开的神经元的比例为0.2,防止过拟合
		隐藏层六:全连接层,神经元数量为self.MS_OUTPUT_SIZE,使用softmax作为激活函数,
		输出层:自定义层,即CTC层,使用CTC的loss作为损失函数,实现连接性时序多输出
		
		'''
        # 每一帧使用13维mfcc特征及其13维一阶差分和13维二阶差分表示,最大信号序列长度为1500
        input_data = Input(name='the_input',
                           shape=(self.AUDIO_LENGTH, self.AUDIO_FEATURE_LENGTH,
                                  1))

        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(input_data)  # 卷积层
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h1)  # 卷积层
        layer_h3 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层
        #layer_h3 = Dropout(0.2)(layer_h2) # 随机中断部分神经网络连接,防止过拟合
        layer_h4 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h3)  # 卷积层
        layer_h5 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h4)  # 卷积层
        layer_h6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h5)  # 池化层

        #test=Model(inputs = input_data, outputs = layer_h6)
        #test.summary()

        layer_h7 = Reshape((400, 3200))(layer_h6)  #Reshape层
        #layer_h5 = LSTM(256, activation='relu', use_bias=True, return_sequences=True)(layer_h4) # LSTM层
        #layer_h6 = Dropout(0.2)(layer_h5) # 随机中断部分神经网络连接,防止过拟合
        layer_h8 = Dense(256,
                         activation="relu",
                         use_bias=True,
                         kernel_initializer='he_normal')(layer_h7)  # 全连接层
        layer_h9 = Dense(1417, use_bias=True,
                         kernel_initializer='he_normal')(layer_h8)  # 全连接层

        y_pred = Activation('softmax', name='Activation0')(layer_h9)
        model_data = Model(inputs=input_data, outputs=y_pred)
        #model_data.summary()

        #labels = Input(name='the_labels', shape=[60], dtype='float32')

        labels = Input(name='the_labels',
                       shape=[self.label_max_string_length],
                       dtype='float32')
        input_length = Input(name='input_length', shape=[1], dtype='int64')
        label_length = Input(name='label_length', shape=[1], dtype='int64')
        # Keras doesn't currently support loss funcs with extra parameters
        # so CTC loss is implemented in a lambda layer

        #layer_out = Lambda(ctc_lambda_func,output_shape=(self.MS_OUTPUT_SIZE, ), name='ctc')([y_pred, labels, input_length, label_length])#(layer_h6) # CTC
        loss_out = Lambda(self.ctc_lambda_func, output_shape=(1, ),
                          name='ctc')(
                              [y_pred, labels, input_length, label_length])

        model = Model(inputs=[input_data, labels, input_length, label_length],
                      outputs=loss_out)

        model.summary()

        # clipnorm seems to speeds up convergence
        #sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
        ada_d = Adadelta(lr=0.01, rho=0.95, epsilon=1e-06)

        #model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd, metrics=["accuracy"])
        model.compile(loss={
            'ctc': lambda y_true, y_pred: y_pred
        },
                      optimizer=ada_d,
                      metrics=['accuracy'])

        # captures output of softmax so we can decode the output during visualization
        test_func = K.function([input_data], [y_pred])

        print('[*提示] 创建模型成功,模型编译成功')
        return model, model_data
示例#49
0
    def generateAdversarialExample(self):
        start_time = time.time()
        model_layer_times1 = self.init_coverage_times(
        )  # times of each neuron covered
        model_layer_times2 = self.init_coverage_times(
        )  # update when new image and adversarial images found
        model_layer_value1 = self.init_coverage_value()

        threshold = 0.25
        neuron_to_cover_num = 10
        iteration_times = 0

        neuron_to_cover_weight = 0.5
        predict_weight = 0.5
        learning_step = 0.02
        total_norm = 0

        img_list = []
        adv_list = []
        adv_labels = []

        while len(adv_list) < 1:

            iteration_times += 1

            total_perturb_adversarial = 0

            tmp_img = self.image.reshape(1, 28, 28, 1)
            orig_img = tmp_img.copy()

            img_list.append(tmp_img)

            self.update_coverage(tmp_img, model_layer_times2, threshold)

            while len(img_list) > 0:

                gen_img = img_list[0]

                img_list.remove(gen_img)

                # first check if input already induces differences
                pred1 = self.model.predict(gen_img)
                label1 = np.argmax(pred1[0])

                label_top5 = np.argsort(pred1[0])[-5:]

                self.update_coverage_value(gen_img, model_layer_value1)
                self.update_coverage(gen_img, model_layer_times1, threshold)

                orig_label = label1

                loss_1 = k.mean(
                    self.model.get_layer('before_softmax').output[...,
                                                                  orig_label])
                loss_2 = k.mean(
                    self.model.get_layer('before_softmax').output[
                        ..., label_top5[-2]])
                loss_3 = k.mean(
                    self.model.get_layer('before_softmax').output[
                        ..., label_top5[-3]])
                loss_4 = k.mean(
                    self.model.get_layer('before_softmax').output[
                        ..., label_top5[-4]])
                loss_5 = k.mean(
                    self.model.get_layer('before_softmax').output[
                        ..., label_top5[-5]])

                layer_output = (predict_weight *
                                (loss_2 + loss_3 + loss_4 + loss_5) - loss_1)

                # neuron coverage loss
                loss_neuron = self.neuron_selection(model_layer_times1,
                                                    neuron_to_cover_num)

                # extreme value means the activation value for a neuron can be as high as possible ...

                layer_output += neuron_to_cover_weight * k.sum(loss_neuron)

                # for adversarial image generation
                final_loss = k.mean(layer_output)

                # we compute the gradient of the input picture wrt this loss
                grads = self.normalize(
                    k.gradients(final_loss, self.model.input)[0])

                grads_tensor_list = [loss_1, loss_2, loss_3, loss_4, loss_5]
                grads_tensor_list.extend(loss_neuron)
                grads_tensor_list.append(grads)
                # this function returns the loss and grads given the input picture

                iterate = k.function([self.model.input], grads_tensor_list)

                # we run gradient ascent for 3 steps
                for iters in range(iteration_times):

                    loss_neuron_list = iterate([gen_img])

                    perturb = loss_neuron_list[-1] * learning_step

                    gen_img += perturb
                    gen_img = np.clip(gen_img, -0.5, 5)

                    # previous accumulated neuron coverage
                    previous_coverage = self.neuron_covered(
                        model_layer_times1)[2]

                    pred1 = self.model.predict(gen_img)
                    label1 = np.argmax(pred1[0])

                    self.update_coverage(gen_img, model_layer_times1,
                                         threshold)  # for seed selection

                    current_coverage = self.neuron_covered(
                        model_layer_times1)[2]

                    diff_img = gen_img - orig_img

                    L2_norm = np.linalg.norm(diff_img)

                    orig_L2_norm = np.linalg.norm(orig_img)

                    perturb_adversial = L2_norm / orig_L2_norm

                    if current_coverage - previous_coverage > 0.0001 and L2_norm < self.similarityMeasure:
                        img_list.append(np.clip(gen_img, -0.5, 5))

                    if label1 != orig_label and L2_norm < self.similarityMeasure:
                        self.update_coverage(gen_img, model_layer_times2,
                                             threshold)

                        total_norm += L2_norm

                        total_perturb_adversarial += perturb_adversial

                        gen_img = np.clip(gen_img, -0.5, 5)

                        adv_list.append(gen_img)
                        adv_labels.append(
                            np.argmax(self.model.predict(gen_img)[0]))

        end_time = time.time()
        self.time = end_time - start_time
        self.advLabel = adv_labels[0]
        self.advImage = adv_list[0]
        self.completed = True

        if self.verbose:
            print('\ncovered neurons percentage %d neurons %.3f' %
                  (len(model_layer_times2),
                   self.neuron_covered(model_layer_times2)[2]))
示例#50
0
    negatives = np.random.choice(possibilities, J)
    neg_l_Ds.append([pos_l_Ds[negative] for negative in negatives])

# Because we're using the "categorical_crossentropy" loss function, we can pretend that
# we're dealing with a multi-class classification problem and that every sample is a
# member of the "0" class.
y = np.zeros(J + 1).reshape(1, J + 1)
y[0][0] = 1

for i in range(sample_size):
    history = model.fit([l_Qs[i], pos_l_Ds[i]] + neg_l_Ds[i],
                        y,
                        nb_epoch=1,
                        verbose=0)

# Here, I walk through how to define a function for calculating output from the
# computational graph. Let's define a function that calculates R(Q, D+) for a given
# query and clicked document. The function depends on two inputs, query and pos_doc.
# That is, if you start at the point in the graph where R(Q, D+) is calculated
# and then work backwards as far as possible, you'll end up at two different starting
# points: query and pos_doc. As a result, we supply those inputs in a list to the
# function. This particular function only calculates a single output, but multiple
# outputs are possible (see the next example).
get_R_Q_D_p = backend.function([query, pos_doc], R_Q_D_p)
get_R_Q_D_p([l_Qs[0], pos_l_Ds[0]])

# A slightly more complex function. Notice that both neg_docs and the output are
# lists.
get_R_Q_D_ns = backend.function([query] + neg_docs, R_Q_D_ns)
get_R_Q_D_ns([l_Qs[0]] + neg_l_Ds[0])
示例#51
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Add hidden layers

        net = layers.Dense(units=32,
                           use_bias=False,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l1(0.01))(states)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.5)(net)

        net = layers.Dense(units=64,
                           use_bias=False,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l1(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.5)(net)

        net = layers.Dense(units=128,
                           use_bias=False,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l1(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.5)(net)

        net = layers.Dense(units=64,
                           use_bias=False,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l1(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.5)(net)

        net = layers.Dense(units=128,
                           use_bias=False,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l1(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.5)(net)

        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(units=self.action_size,
                                   activation='sigmoid',
                                   name='raw_actions')(net)

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))

        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam()
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
示例#52
0
def get_hidden_output(model, inp):
    get_activations = K.function(model.inputs[:1] + [K.learning_phase()], [
        model.layers[5].get_output_at(0),
    ])
    activations = get_activations([inp, 0])
    return activations[0]
示例#53
0
#
#So that's what you're going to do right now!
#
#X_test from the Banknote Authentication dataset and its model are preloaded. Type model.summary() in the console to check it.

# Import keras backend
import keras.backend as K

# Input tensor from the 1st layer of the model
inp = model.layers[0].input

# Output tensor from the 1st layer of the model
out = model.layers[0].output

# Define a function from inputs to outputs
inp_to_out = K.function([inp], [out])

# Print the results of passing X_test through the 1st layer
print(inp_to_out([X_test]))


#Neural separation
#
#Put on your gloves because you're going to perform brain surgery!
#
#Neurons learn by updating their weights to output values that help them better distinguish between the different output classes in your dataset. You will make use of the inp_to_out() function you just built to visualize the output of two neurons in the first layer of the Banknote Authentication model as it learns.
#
#The model you built in chapter 2 is ready for you to use, just like X_test and y_test. Paste show_code(plot) in the console if you want to check plot().
#
#You're performing heavy duty, once all is done, click through the graphs to watch the separation live!
def plot():
示例#54
0
def play():
    global mouse_pressed
    global current_notes
    global audio_pause
    global needs_update
    global current_params
    global prev_mouse_pos
    global audio_reset
    global instrument
    global songs_loaded
    global autosavenow
    global autosavenum
    global autosave
    global blend
    global blendstate
    global blendfactor
    global keyframe_params
    global keyframe_controls
    global keyframe_paths
    global cur_controls
    global keyframe_magnitudes
    global blend_slerp

    print("Keras version: " + keras.__version__)

    K.set_image_data_format('channels_first')

    print("Loading encoder...")
    model = load_model(dir_name + 'model.h5')
    encoder = Model(inputs=model.input,
                    outputs=model.get_layer('encoder').output)
    decoder = K.function([model.get_layer('decoder').input, K.learning_phase()],
                         [model.layers[-1].output])

    print("Loading gaussian/pca statistics...")
    latent_means = np.load(dir_name + sub_dir_name + '/latent_means.npy')
    latent_stds = np.load(dir_name + sub_dir_name + '/latent_stds.npy')
    latent_pca_values = np.load(
        dir_name + sub_dir_name + '/latent_pca_values.npy')
    latent_pca_vectors = np.load(
        dir_name + sub_dir_name + '/latent_pca_vectors.npy')

    # open a window
    pygame.init()
    pygame.font.init()
    screen = pygame.display.set_mode((int(window_w), int(window_h)))
    notes_surface = screen.subsurface((notes_x, notes_y, notes_w, notes_h))
    pygame.display.set_caption('Neural Composer')

    # start the audio stream
    audio_stream = audio.open(
        format=audio.get_format_from_width(2),
        channels=1,
        rate=sample_rate,
        output=True,
        stream_callback=audio_callback)
    audio_stream.start_stream()

    # main loop
    running = True
    random_song_ix = 0
    cur_len = 0
    blendcycle = 0
    apply_controls()
    while running:
        # process events
        if autosavenow:
            # generate random song
            current_params = np.clip(np.random.normal(
                0.0, 1.0, (num_params,)), -num_sigmas, num_sigmas)
            needs_update = True
            audio_reset = True
            # save slider values
            with open("results/history/autosave" + str(autosavenum)+".txt", "w") as text_file:
                text_file.write(sub_dir_name + "\n")
                text_file.write(str(instrument) + "\n")
                for iter in cur_controls:
                    text_file.write(str(iter) + "\n")
                for iter in current_params:
                    text_file.write(str(iter) + "\n")
            # save song as wave
            audio_pause = True
            audio_reset = True
            save_audio = b''
            while True:
                save_audio += audio_callback(None, 1024, None, None)[0]
                if audio_time == 0:
                    break
            wave_output = wave.open('results/history/autosave' + str(autosavenum)+'.wav', 'w')
            wave_output.setparams(
                (1, 2, sample_rate, 0, 'NONE', 'not compressed'))
            wave_output.writeframes(save_audio)
            wave_output.close()
            audio_pause = False
            autosavenum += 1
            autosavenow = False
            needs_update = True
            audio_reset = True
        blendcycle += 1
        if blend and blendcycle > 10:
            blendcycle = 0
            if blendstate%2 == 0:
                needs_update = True
                current_params = np.copy(keyframe_params[int(blendstate/2)])
                cur_controls = np.copy(keyframe_controls[int(blendstate/2)])
                apply_controls()
            elif blendstate%2 == 1:
                for x in range(0,len(current_params)):
                    current_params[x] = (blendfactor * keyframe_params[int(blendstate/2),x]) + ((1-blendfactor)*keyframe_params[((int(blendstate/2))+1)%len(keyframe_paths),x])
                if blend_slerp:
                    magnitude = (blendfactor * keyframe_magnitudes[int(blendstate/2)]) + ((1-blendfactor)*keyframe_magnitudes[((int(blendstate/2))+1)%len(keyframe_paths)])
                    current_params = current_params * ((sum(current_params*current_params)**-0.5) * magnitude)
                for x in range(0,len(cur_controls)):
                    cur_controls[x] = (blendfactor * keyframe_controls[int(blendstate/2),x]) + ((1-blendfactor)*keyframe_controls[((int(blendstate/2))+1)%len(keyframe_paths),x])
                apply_controls()
                needs_update = True
        for event in pygame.event.get():
            if event.type == pygame.QUIT:  # QUIT BUTTON HIT
                running = False
                break

            elif event.type == pygame.MOUSEBUTTONDOWN:  # MOUSE BUTTON DOWN
                if pygame.mouse.get_pressed()[0]:
                    prev_mouse_pos = pygame.mouse.get_pos()
                    update_mouse_click(prev_mouse_pos)
                    update_mouse_move(prev_mouse_pos)
                elif pygame.mouse.get_pressed()[2]:
                    current_params = np.zeros((num_params,), dtype=np.float32)
                    needs_update = True

            elif event.type == pygame.MOUSEBUTTONUP:   # MOUSE BUTTON UP
                mouse_pressed = 0
                prev_mouse_pos = None

            elif event.type == pygame.MOUSEMOTION and mouse_pressed > 0:  # MOUSE MOTION WHILE PRESSED
                update_mouse_move(pygame.mouse.get_pos())

            elif event.type == pygame.KEYDOWN:
                if event.key == pygame.K_r:  # KEYDOWN R
                    # generate random song
                    current_params = np.clip(np.random.normal(
                        0.0, 1.0, (num_params,)), -num_sigmas, num_sigmas)
                    needs_update = True
                    audio_reset = True
                if event.key == pygame.K_t:  # KEYDOWN T
                    for x in range(int(num_params/3)+1, num_params):
                        current_params[x] = np.clip(np.random.normal(0.0,1.0), -num_sigmas, num_sigmas)
                    needs_update = True
                if event.key == pygame.K_x:  # KEYDOWN X
                    # generate random song
                    current_params += np.clip(np.random.normal(
                        0.0, 0.3, (num_params,)), -num_sigmas, num_sigmas)
                    needs_update = True
                if event.key == pygame.K_a:  # KEYDOWN A
                    autosave = not autosave
                if event.key == pygame.K_b:  # KEYDOWN B
                    blend = not blend
                    blendstate = 0
                    blendfactor = 1.0
                    if blend:
                        audio_pause = True
                        audio_reset = True
                        needs_update = True
                        blendnum = int(input("The number of songs to be blended "))
                        keyframe_paths = []
                        keyframe_controls = np.zeros((blendnum,len(cur_controls)),dtype=np.float32)
                        keyframe_params = np.zeros((blendnum,num_params),dtype=np.float32)
                        for y in range(blendnum):
                            fileName = input("The file name of the next song to be blended ")
                            if "." not in fileName:
                                fileName = fileName + ".txt"
                            keyframe_paths.append((fileName))
                            fo = open("results/history/" + fileName, "r")
                            if not sub_dir_name == fo.readline()[:-1]:
                                running = false
                                print("incompatable with current model")
                                break
                            instrument = int(fo.readline())
                            for x in range(len(cur_controls)):
                                keyframe_controls[y,x] = float(fo.readline())
                            for x in range(len(current_params)):
                                keyframe_params[y,x] = float(fo.readline())
                            #keyframe_magnitudes[y] = sum(keyframe_params[y]*keyframe_params[y])**0.5
                if event.key == pygame.K_e:  # KEYDOWN E
                    # generate random song with larger variance
                    current_params = np.clip(np.random.normal(0.0, 2.0, (num_params,)), -num_sigmas, num_sigmas)
                    needs_update = True
                    audio_reset = True
                if event.key == pygame.K_PERIOD:
                    current_params /= 1.1
                    needs_update = True
                if event.key == pygame.K_COMMA:
                    current_params *= 1.1
                    needs_update = True
                if event.key == pygame.K_SLASH:
                    current_params *= -1
                    needs_update = True
                if event.key == pygame.K_UP:
                    cur_controls[0] = (210.0 - note_threshold + 1) / 200
                    apply_controls()
                if event.key == pygame.K_DOWN:
                    cur_controls[0] = (210.0 - note_threshold - 1) / 200
                    apply_controls()
                if event.key == pygame.K_s:  # KEYDOWN S
                    # save slider values
                    audio_pause = True
                    fileName = input("File Name to save into ")
                    if "." not in fileName:
                        fileName = fileName + ".txt"
                    with open("results/history/" + fileName, "w") as text_file:
                        if blend:
                            text_file.write(sub_dir_name + "\n")
                            text_file.write("blended song" + "\n")
                            text_file.write(str(len(keyframe_paths)) + "\n")
                            for x in range(len(keyframe_paths)):
                                text_file.write("" + keyframe_paths[x] + "\n")
                        else:
                            text_file.write(sub_dir_name + "\n")
                            text_file.write(str(instrument) + "\n")
                            for iter in cur_controls:
                                text_file.write(str(iter) + "\n")
                            for iter in current_params:
                                text_file.write(str(iter) + "\n")
                if event.key == pygame.K_l:  # KEYDOWN L
                    audio_pause = True
                    needs_update = True
                    audio_reset = True
                    fileName = input("File Name to read ")
                    if "." not in fileName:
                        fileName = fileName + ".txt"
                    fo = open("results/history/" + fileName, "r")
                    print (fo.name)
                    if not sub_dir_name == fo.readline()[:-1]:
                                running = false
                                print("incompatable with current model")
                                break
                    tempDir = fo.readline()
                    if tempDir.startswith("blended song"):
                        blend = True
                        blendnum = int(fo.readline())
                        keyframe_paths = []
                        keyframe_controls = np.zeros((blendnum,len(cur_controls)),dtype=np.float32)
                        keyframe_params = np.zeros((blendnum,num_params),dtype=np.float32)
                        for y in range(blendnum):
                            fileName2 = fo.readline()[:-1]
                            keyframe_paths.append(fileName)
                            fo2 = open("results/history/" + fileName2, "r")
                            if not sub_dir_name == fo2.readline()[:-1]:
                                running = false
                                print("incompatable with current model")
                                break
                            instrument = int(fo2.readline())
                            for x in range(len(cur_controls)):
                                keyframe_controls[y,x] = float(fo2.readline())
                            for x in range(len(current_params)):
                                keyframe_params[y,x] = float(fo2.readline())
                    else:
                        instrument = int(tempDir)
                        for x in range(len(cur_controls)):
                            cur_controls[x] = float(fo.readline())
                        for x in range(len(current_params)):
                            current_params[x] = float(fo.readline())
                    apply_controls()
                if event.key == pygame.K_o:  # KEYDOWN O

                    if not songs_loaded:
                        print("Loading songs...")
                        try:
                            y_samples = np.load('data/interim/samples.npy')
                            y_lengths = np.load('data/interim/lengths.npy')
                            songs_loaded = True
                        except Exception as e:
                            print("This functionality is to check if the model training went well by reproducing an original song. "
                                  "The composer could not load samples and lengths from model training. "
                                  "If you have the midi files, the model was trained with, process them by using"
                                  " the preprocess_songs.py to find the requested files in data/interim "
                                  "(Load exception: {0}".format(e))

                    if songs_loaded:
                        # check how well the autoencoder can reconstruct a random song
                        print("Random Song Index: " + str(random_song_ix))
                        if is_ae:
                            example_song = y_samples[cur_len:cur_len + num_measures]
                            current_notes = example_song * 255
                            latent_x = encoder.predict(np.expand_dims(
                                example_song, 0), batch_size=1)[0]
                            cur_len += y_lengths[random_song_ix]
                            random_song_ix += 1
                        else:
                            random_song_ix = np.array(
                                [random_song_ix], dtype=np.int64)
                            latent_x = encoder.predict(
                                random_song_ix, batch_size=1)[0]
                            random_song_ix = (
                                random_song_ix + 1) % model.layers[0].input_dim

                        if use_pca:
                            current_params = np.dot(
                                latent_x - latent_means, latent_pca_vectors.T) / latent_pca_values
                        else:
                            current_params = (
                                latent_x - latent_means) / latent_stds

                        needs_update = True
                        audio_reset = True

                if event.key == pygame.K_m:  # KEYDOWN M
                    # save song as midi
                    audio_pause = True
                    audio_reset = True
                    fileName = input("File Name to save into ")
                    if "." not in fileName:
                        fileName = fileName + ".mid"
                    midi_utils.samples_to_midi(
                        current_notes, 'results/history/' + fileName, note_threshold)
                    audio_pause = False

                if event.key == pygame.K_w:  # KEYDOWN W
                    # save song as wave
                    audio_pause = True
                    audio_reset = True
                    fileName = input("File Name to save into ")
                    if "." not in fileName:
                        fileName = fileName + ".wav"
                    save_audio = b''
                    while True:
                        save_audio += audio_callback(None, 1024, None, None)[0]
                        if audio_time == 0:
                            break
                    wave_output = wave.open('results/history/' + fileName + '.wav', 'w')
                    wave_output.setparams(
                        (1, 2, sample_rate, 0, 'NONE', 'not compressed'))
                    wave_output.writeframes(save_audio)
                    wave_output.close()
                    audio_pause = False

                if event.key == pygame.K_ESCAPE:  # KEYDOWN ESCAPE
                    # exit application
                    running = False
                    break

                if event.key == pygame.K_SPACE:  # KEYDOWN SPACE
                    # toggle pause/play audio
                    audio_pause = not audio_pause

                if event.key == pygame.K_TAB:  # KEYDOWN TAB
                    # reset audio playing
                    audio_reset = True
                    if autosave and not autosavenow:
                        autosavenow = True

                if event.key == pygame.K_1:  # KEYDOWN 1
                    # play instrument 0
                    instrument = 0

                if event.key == pygame.K_2:  # KEYDOWN 2
                    # play instrument 1
                    instrument = 1

                if event.key == pygame.K_3:  # KEYDOWN 3
                    # play instrument 2
                    instrument = 2

                if event.key == pygame.K_4:  # KEYDOWN 4
                    # play instrument 3
                    instrument = 3

                if event.key == pygame.K_5:  # KEYDOWN 5
                    # play instrument 4
                    instrument = 4

                if event.key == pygame.K_c:  # KEYDOWN C
                    #
                    y = np.expand_dims(
                        np.where(current_notes > note_threshold, 1, 0), 0)
                    latent_x = encoder.predict(y)[0]
                    if use_pca:
                        current_params = np.dot(
                            latent_x - latent_means, latent_pca_vectors.T) / latent_pca_values
                    else:
                        current_params = (
                            latent_x - latent_means) / latent_stds
                    needs_update = True

        # check if params were changed so that a new song should be generated
        if needs_update:
            if use_pca:
                latent_x = latent_means + \
                    np.dot(current_params * latent_pca_values,
                           latent_pca_vectors)
            else:
                latent_x = latent_means + latent_stds * current_params
            latent_x = np.expand_dims(latent_x, axis=0)
            y = decoder([latent_x, 0])[0][0]
            current_notes = (y * (255)).astype(np.uint8)
            needs_update = False

        # draw GUI to the screen
        screen.fill(background_color)
        draw_notes(screen, notes_surface)
        draw_sliders(screen)
        draw_controls(screen)

        # flip the screen buffer
        pygame.display.flip()
        pygame.time.wait(10)

    # if app is exited, close the audio stream
    audio_stream.stop_stream()
    audio_stream.close()
    audio.terminate()
		combination_features = style_layer_output[2, :, :, :]
		loss += beta * style_loss(style_features, combination_features) / len(style_layers)

	loss += gamma * total_variation_loss(combination_placeholder)
	
	return loss

content_layer = 'block5_conv2'
style_layers = ['block1_conv1', 'block2_conv1', 
								'block3_conv1', 'block4_conv1', 
								'block5_conv1']
loss = loss_tensor(content_layer, style_layers)
grads = K.gradients(loss, combination_placeholder)

outputs = [loss] + grads
f_outputs = K.function(inputs=[combination_placeholder, ], outputs=outputs)

class Evaluator:
	def __init__(self):
		self.loss_values, self.grad_values = None, None

	def eval_loss_grads(self, x):
		x = x.reshape((1, nrows, ncols, 3))
		outs = f_outputs([x])
		loss = outs[0]
		if len(outs[1:]) == 1:
			grads = outs[1].flatten().astype('float64')
		else:
			grads = np.array(outs[1:], dtype='float64').flatten()
		return loss, grads
示例#56
0
    def build_model(self):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
        # Define input layers
        states = layers.Input(shape=(self.state_size, ), name='states')
        actions = layers.Input(shape=(self.action_size, ), name='actions')

        # Add hidden layer(s) for state pathway
        net_states = layers.Dense(
            units=32,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        net_states = layers.Dense(
            units=64,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        net_states = layers.Dense(
            units=128,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        net_states = layers.Dense(
            units=64,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        net_states = layers.Dense(
            units=128,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        # Add hidden layer(s) for action pathway
        net_actions = layers.Dense(
            units=32,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        net_actions = layers.Dense(
            units=64,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        net_actions = layers.Dense(
            units=128,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        net_actions = layers.Dense(
            units=64,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        net_actions = layers.Dense(
            units=128,
            use_bias=False,
            activation='relu',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Combine state and action pathways
        net = layers.Add()([net_states, net_actions])
        net = layers.Activation('relu')(net)

        # Add more layers to the combined network if needed

        # Add final output layer to prduce action values (Q values)
        Q_values = layers.Dense(units=1, name='q_values')(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with built-in loss function
        optimizer = optimizers.Adam()
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.input, K.learning_phase()],
            outputs=action_gradients)
示例#57
0
            continue
        for j in range(1, noiseLevels + 1):
            tempImgName = imgName[0:3] + "_" + "{:0>2}".format(i) + "_" + str(
                j) + ".bmp"

            tempImgScore = mos_scores[np.where(
                tempImgName.lower() == mos_names)[0][0]]
            allImgNames.append(tempImgName)
            allImgScores.append(tempImgScore)

modelIndex = int(float(sys.argv[4]))
model = constructDNNModel(modelIndex)

# get_layer_output = K.function([model.layers[0].input], [model.layers[-2].get_output(train=False)])
get_layer_output = K.function(
    [model.inputs[i].input for i in model.input_order],
    [model.nodes['dense2'].get_output(train=False)])

# Making the data for the multi-patch network:

hyperImages = np.empty(
    (len(allImgScores), denseLayerSize,
     float(imgRows - patchSize) / float(patchSize * patchOverlap) + 1,
     float(imgCols - patchSize) / float(patchSize * patchOverlap) + 1),
    dtype=float)
labels = np.empty((len(allImgScores), ), dtype=float)
# pdb.set_trace()

for i in range(len(allImgNames)):
    # print str(i) + "/" + str(len(allImgNames))
    imgName = allImgNames[i]
示例#58
0
def main():

    ## retrieve arguments and print out in shell
    args = args_parser()
    ## print out information on shell
    info_print(args)

    ## create output directory if not available ##

    #### Keras Model Loading ####
    if args.model.lower() == "vgg16":
        from keras.applications.vgg16 import VGG16 as keras_model, preprocess_input
    elif args.model.lower() == "vgg19":
        from keras.applications.vgg19 import VGG19 as keras_model, preprocess_input

    ## Define local variables in main environment
    if not "content/" in args.content_image_path:
        content_image_path = "content/" + args.content_image_path
        base_path = args.content_image_path
    else:
        content_image_path = args.content_image_path
        base_path = args.content_image_path[-1]

    ## remove file extension
    base_path = os.path.splitext(base_path)[0]

    output_subdir = args.output_subdir
    if output_subdir is None:
        ## Create output subdirectory
        output_subdir = "output/{}".format(base_path)
        if not os.path.exists(output_subdir):
            os.makedirs(output_subdir)
    else:
        if not "output/" in output_subdir:
            output_subdir = "output/" + output_subdir
        if not os.path.exists(output_subdir):
            os.makedirs(output_subdir)

    if not "style/" in args.style_image_path:
        style_image_path = "style/" + args.style_image_path
    else:
        style_image_path = args.style_image_path

    init_image = args.init_image
    image_width = args.image_width
    image_height = args.image_height
    img_size = (image_height, image_width)
    content_weight = args.content_weight
    style_weights = args.style_weights
    total_variation_weight = args.total_variation_weight
    num_iter = args.num_iter
    model = args.model
    rescale_image = str_to_bool(args.rescale_image)
    content_layer = args.content_layer
    if args.style_layers == None:
        style_layers = [
            'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
            'block5_conv1'
        ]
    else:
        style_layers = args.style_layers

    print(style_layers)

    original_size = Image.open(content_image_path).size

    ###### Content Image ######
    ## Get preprocessed content image array
    content_image = preprocess_image(content_image_path, img_size,
                                     preprocess_input)
    ## Parse content_image numpy array as Keras Backend Variable
    content_image = K.variable(content_image,
                               dtype="float32",
                               name="content_image")

    ###### Style Image ######
    ## Get preprocessed style image array
    style_image = preprocess_image(style_image_path, img_size,
                                   preprocess_input)
    ## Parse style image numpy array as Keras Backend Variable
    style_image = K.variable(style_image, dtype="float32", name="style_image")

    ###### Generated Image ######
    ## Init generated image as numpy array and parse into Keras Backend Variable
    if init_image == "content":
        generated_image = preprocess_image(content_image_path, img_size,
                                           preprocess_input)
    elif init_image == "random":
        generated_image = np.random.randint(256,
                                            size=(image_width, image_height,
                                                  3)).astype("float64")
        generated_image = preprocess_input(
            np.expand_dims(generated_image, axis=0))
    else:
        import sys
        print("wrong init_image")
        sys.exit(1)
    fname = output_subdir + "/generated_image_at_iteration_0.jpg"
    save_img(path=fname, x=generated_image[0])

    ## Define generate image variable placeholder for later optimization
    # Theano
    if K.image_data_format() == "channels_first":
        generated_image_placeholder = K.placeholder(shape=(1, 3, image_height,
                                                           image_width))
    # Tensorflow
    else:
        generated_image_placeholder = K.placeholder(shape=(1, image_height,
                                                           image_width, 3))

    ###### Initialize one keras models with one input tensors which is concatenated by 3 images ######
    input_tensor = K.concatenate(
        [content_image, style_image, generated_image_placeholder], axis=0)
    ## input_tensor is a 4D tensor, with shape (3, image_height, image_width, 3) where the first 3 is the concatenation of 3 images and last 3 the color channel (tf)

    # build the keras network with our 3 images as input
    model = keras_model(input_tensor=input_tensor,
                        weights='imagenet',
                        include_top=False)

    # get the symbolic outputs of each layer (we gave them unique names). [Feature representations/maps in form of 4D tensors at each layer]
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])

    # combine these loss functions into a single scalar
    loss = K.variable(0.0)
    layer_features = outputs_dict[content_layer]

    ############# Content extraction: #############
    # retrieve content_image output for content_layer
    content_image_features = layer_features[0, :, :, :]
    # retrieve generated_image output from content_layer
    generated_image_features = layer_features[2, :, :, :]
    # get loss containing only content loss
    loss = loss + content_weight * content_loss(content_image_features,
                                                generated_image_features)

    ############# Style Extraction:  #############
    if len(style_weights) == 1:
        style_weights = [style_weights[0]] * len(style_layers)
    else:
        assert len(style_weights) == len(style_layers)
        style_weights = [float(style_weight) for style_weight in style_weights]

    session = K.get_session()
    for style_weight, layer_name in zip(style_weights, style_layers):
        ## get feature activations from layers
        layer_features = outputs_dict[layer_name]
        ## retrieve style_image output activations for a style_layer
        style_image_features = layer_features[1, :, :, :]
        ## retrieve generated_image output activations for a style_layer
        generated_image_features = layer_features[2, :, :, :]
        ## get loss containing content loss and style loss
        loss = loss + (style_weight / len(style_layers)) * style_loss(
            style_image_features, generated_image_features, img_size, session)

    ## get loss containing content loss, style loss and total variation loss
    loss = loss + total_variation_weight * total_variation_loss(
        generated_image_placeholder, img_size)

    # get the gradients of the generated image wrt. the loss
    grads = K.gradients(loss, generated_image_placeholder)

    # Define outputs list to have loss included
    outputs = [loss]

    # add the gradients to the outputs instance
    if isinstance(grads, (list, tuple)):
        outputs += grads
    else:
        outputs.append(grads)

    ## Define keras function with input the placeholder of the generated image and output the {loss and gradients} for learning
    f_outputs = K.function(inputs=[generated_image_placeholder],
                           outputs=outputs)

    class Evaluator(object):
        def __init__(self):
            self.loss_value = None
            self.grads_values = None

        def loss(self, x):
            assert self.loss_value is None
            loss_value, grad_values = eval_loss_and_grads(
                x, img_size, f_outputs)
            self.loss_value = loss_value
            self.grad_values = grad_values
            return self.loss_value

        def grads(self, x):
            assert self.loss_value is not None
            grad_values = np.copy(self.grad_values)
            self.loss_value = None
            self.grad_values = None
            return grad_values

    # this Evaluator class makes it possible
    # to compute loss and gradients in one pass
    # while retrieving them via two separate functions,
    # "loss" and "grads". This is done because scipy.optimize
    # requires separate functions for loss and gradients,
    # but computing them separately would be inefficient.

    evaluator = Evaluator()

    # run scipy-based optimization (L-BFGS) over the pixels of the generated image
    # so as to minimize the neural style loss
    loss_history = [None] * num_iter
    for i in range(num_iter):
        print("Start of iteration:", i + 1)
        start_time = time.time()
        generated_image, loss_history[i], info = fmin_l_bfgs_b(
            evaluator.loss,
            generated_image.flatten(),
            fprime=evaluator.grads,
            maxfun=20)
        print("Current loss value:", loss_history[i])
        # save current generated image
        img = deprocess_image(generated_image.copy(), img_shape=img_size)
        if rescale_image:
            img = array_to_img(img[0])
            img = img.resize(original_size)
            img = img_to_array(img)

        fname = output_subdir + "/generated_image_at_iteration_%s.png" % str(
            i + 1)
        save_img(path=fname, x=img)
        end_time = time.time()
        print("Image saved at:", fname)
        print("Iteration %s completed in %ds" %
              (str(i + 1), end_time - start_time))

    # summarize history for loss
    plt.figure(3, figsize=(7, 5))
    plt.plot(loss_history)
    plt.title("loss process during neural style transfer")
    plt.ylabel("loss")
    plt.xlabel("iteration")
    plt.savefig(output_subdir + "/loss_history.jpg")
    plt.close()
    style_masks = mask_features[layer][STYLE, :, :, :]
    target_masks = mask_features[layer][TARGET, :, :, :]
    sl = style_loss(style_feat, target_feat, style_masks, target_masks)
    loss += (style_weight / len(style_feature_layers)) * sl

loss += total_variation_weight * total_variation_loss(target_image)
loss_grads = K.gradients(loss, target_image)

# Evaluator class for computing efficiency
outputs = [loss]
if isinstance(loss_grads, (list, tuple)):
    outputs += loss_grads
else:
    outputs.append(loss_grads)

f_outputs = K.function([target_image], outputs)


def eval_loss_and_grads(x):
    if K.image_data_format() == 'channels_first':
        x = x.reshape((1, 3, img_nrows, img_ncols))
    else:
        x = x.reshape((1, img_nrows, img_ncols, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values
示例#60
0
# we will assume the weight of the content loss is 1
# and only weight the style losses
style_weights = [0.2, 0.4, 0.3, 0.5, 0.2]

# create the total loss which is the sum of content + style loss
loss = K.mean(K.square(content_model.output - content_target))

for w, symbolic, actual in zip(style_weights, symbolic_conv_outputs,
                               style_layers_outputs):
    # gram_matrix() expects a (H, W, C) as input
    loss += w * style_loss(symbolic[0], actual[0])

# once again, create the gradients and loss + grads function
# note: it doesn't matter which model's input you use
# they are both pointing to the same keras Input layer in memory
grads = K.gradients(loss, vgg.input)

# just like theano.function
get_loss_and_grads = K.function(inputs=[vgg.input], outputs=[loss] + grads)


def get_loss_and_grads_wrapper(x_vec):
    l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)])
    return l.astype(np.float64), g.flatten().astype(np.float64)


final_img = minimize(get_loss_and_grads_wrapper, 10, batch_shape)
plt.imshow(scale_img(final_img))
plt.show()