Exemplo n.º 1
0
    def on_epoch_end(self, epoch, logs=None):
        self.losses.append(logs.get('loss'))
        self.num_epochs = epoch
        self.losses.append(logs.get('loss'))
        if epoch % 6 != 0: return
        # return
        intermediate_layer_model = Model(
            inputs=self.model.input,
            outputs=self.model.get_layer('abundances').output)
        abundances = intermediate_layer_model.predict(self.input)
        endmembers = self.model.layers[len(self.model.layers) -
                                       1].get_weights()[0]
        plotHist(self.losses, 33)
        if self.plotGT:
            dict = order_endmembers(endmembers, self.endmembersGT)
            # if self.is_GT_for_A:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, self.abundancesGT, dict, self.use_ASC)
            # else:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC, is_GT=False)
            plotEndmembersAndGT(endmembers, self.endmembersGT, dict)
            # plotAbundancesSimple(self.num_endmembers, self.size_data, abundances, dict, use_ASC=1, figure_nr=10)
        else:
            # plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC)
            plotEndmembers(self.num_endmembers, endmembers)

        print(K.eval(self.model._collected_trainable_weights[-3]))
        return
Exemplo n.º 2
0
    def on_epoch_end(self, epoch, logs=None):
        self.losses.append(logs.get('loss'))
        self.num_epochs = epoch
        self.losses.append(logs.get('loss'))
        if epoch % self.plot_every_n != 0: return
        return
        if self.plotS:
            intermediate_layer_model = Model(
                inputs=self.model.input,
                outputs=self.model.get_layer('abundances').output)
            abundances = intermediate_layer_model.predict(self.input)
            if self.size is None:
                self.size = (int(np.sqrt(abundances.shape[0])),
                             int(np.sqrt(abundances.shape[0])))
        endmembers = self.model.layers[len(self.model.layers) -
                                       1].get_weights()[0]

        # plotHist(self.losses, 33)
        self.plotGT = True
        if self.plotGT:
            dict = order_endmembers(endmembers, self.endmembersGT)
            # if self.is_GT_for_A:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, self.abundancesGT, dict, self.use_ASC)
            # else:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC, is_GT=False)
            plotEndmembersAndGT(self.endmembersGT, endmembers, dict)
            if self.plotS:
                plotAbundancesSimple(self.num_endmembers,
                                     (self.size[0], self.size[1]),
                                     abundances,
                                     dict,
                                     use_ASC=1,
                                     figure_nr=10)
        else:
            # plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC)
            plotEndmembers(self.num_endmembers, endmembers)
            plotAbundancesSimple(self.num_endmembers,
                                 (self.size[0], self.size[1]),
                                 abundances,
                                 dict=None,
                                 use_ASC=1,
                                 figure_nr=10)
        return
Exemplo n.º 3
0
class VGG19Features(object):
    def __init__(
        self,
        session,
        feature_layers=None,
        feature_weights=None,
        gram_weights=None,
        default_gram=0.1,
        original_scale=False,
        eager=False,
    ):
        if eager:
            pass
        else:
            K.set_session(session)
        self.base_model = VGG19(include_top=False, weights="imagenet")
        if feature_layers is None:
            feature_layers = [
                "input_1",
                "block1_conv2",
                "block2_conv2",
                "block3_conv2",
                "block4_conv2",
                "block5_conv2",
            ]
        self.layer_names = [l.name for l in self.base_model.layers]
        for k in feature_layers:
            if not k in self.layer_names:
                raise KeyError("Invalid layer {}. Available layers: {}".format(
                    k, self.layer_names))
        self.feature_layers = feature_layers
        features = [
            self.base_model.get_layer(k).output for k in feature_layers
        ]
        self.model = Model(inputs=self.base_model.input, outputs=features)
        if feature_weights is None:
            feature_weights = len(feature_layers) * [1.0]
        if gram_weights is None:
            gram_weights = len(feature_layers) * [default_gram]
        elif isinstance(gram_weights, (int, float)):
            gram_weights = len(feature_layers) * [gram_weights]
        self.feature_weights = feature_weights
        self.gram_weights = gram_weights
        assert len(self.feature_weights) == len(features)
        self.use_gram = np.max(self.gram_weights) > 0.0
        self.original_scale = original_scale

        self.variables = self.base_model.weights

    def extract_features(self, x):
        """x should be rgb in [-1,1]."""
        x = self.preprocess_input(x)
        features = self.model.predict(x)
        return features

    def make_features_op(self, x):
        """y should be rgb tensors in [-1, 1]. This function is just used for testing"""
        if self.original_scale:
            x = tf.image.resize_bilinear(x, [256, 256])
            bs = tf.shape(x)[0]
            # x = tf.random_crop(xy, [bs, 224, 224, 3])
            x = x[:, 16:239, 16:239, :]

        x = self.preprocess_input(x)
        x_features = self.model(x)
        return x_features

    def grams(self, fs):
        gs = list()
        for f in fs:
            bs, h, w, c = f.shape.as_list()
            bs = -1 if bs is None else bs
            f = tf.reshape(f, [bs, h * w, c])
            ft = tf.transpose(f, [0, 2, 1])
            g = tf.matmul(ft, f)
            g = g / (4.0 * h * w)
            gs.append(g)
        return gs

    def make_loss_op(self, x, y):
        """x, y should be rgb tensors in [-1,1]. Uses l1 and spatial average."""
        if self.original_scale:
            xy = tf.concat([x, y], axis=0)
            xy = tf.image.resize_bilinear(xy, [256, 256])
            bs = tf.shape(xy)[0]
            xy = tf.random_crop(xy, [bs, 224, 224, 3])
            x, y = tf.split(xy, 2, 0)

        x = self.preprocess_input(x)
        x_features = self.model(x)

        y = self.preprocess_input(y)
        y_features = self.model(y)

        x_grams = self.grams(x_features)
        y_grams = self.grams(y_features)

        losses = [
            tf.reduce_mean(tf.abs(xf - yf))
            for xf, yf in zip(x_features, y_features)
        ]
        gram_losses = [
            tf.reduce_mean(tf.abs(xg - yg))
            for xg, yg in zip(x_grams, y_grams)
        ]

        for i in range(len(losses)):
            losses[i] = self.feature_weights[i] * losses[i]
            gram_losses[i] = self.gram_weights[i] * gram_losses[i]
        loss = tf.add_n(losses)
        if self.use_gram:
            loss = loss + tf.add_n(gram_losses)

        self.losses = losses
        self.gram_losses = gram_losses

        return loss

    def make_nll_op(self,
                    x,
                    y,
                    log_variances,
                    gram_log_variances=None,
                    calibrate=True):
        """x, y should be rgb tensors in [-1,1]. This version treats every
        layer independently."""
        use_gram = gram_log_variances is not None
        if self.original_scale:
            xy = tf.concat([x, y], axis=0)
            xy = tf.image.resize_bilinear(xy, [256, 256])
            bs = tf.shape(xy)[0]
            xy = tf.random_crop(xy, [bs, 224, 224, 3])
            x, y = tf.split(xy, 2, 0)

        x = self.preprocess_input(x)
        x_features = self.model(x)

        y = self.preprocess_input(y)
        y_features = self.model(y)

        if use_gram:
            x_grams = self.grams(x_features)
            y_grams = self.grams(y_features)

        if len(log_variances) == 1:
            log_variances = len(x_features) * [log_variances[0]]

        feature_ops = [
            _ll_loss(xf, yf, logvar, calibrate=calibrate)
            for xf, yf, logvar in zip(x_features, y_features, log_variances)
        ]
        losses = [f[0] for f in feature_ops]
        self.losses = losses
        calibrations = [f[1] for f in feature_ops]
        self.calibrations = calibrations
        if use_gram:
            gram_ops = [
                _ll_loss(xg, yg, glogvar) for xg, yg, glogvar in zip(
                    x_grams, y_grams, gram_log_variances)
            ]
            gram_losses = [g[0] for g in gram_ops]
            self.gram_losses = gram_losses
            gram_calibrations = [g[1] for g in gram_ops]
            self.gram_calibrations = gram_calibrations

        loss = tf.add_n(losses)
        if use_gram:
            loss = loss + tf.add_n(gram_losses)

        return loss

    def make_l1_nll_op(self, x, y, log_variance):
        """x, y should be rgb tensors in [-1,1]. Uses make_loss_op to compute
        version compatible with previous experiments."""

        rec_loss = 1e-3 * self.make_loss_op(x, y)
        dim = np.prod(x.shape.as_list()[1:])
        log_gamma = log_variance
        gamma = tf.exp(log_gamma)
        log2pi = np.log(2.0 * np.pi)
        likelihood = 0.5 * dim * (rec_loss / gamma + log_gamma + log2pi)

        return likelihood

    def make_style_op(self, x, y):
        __feature_weights = self.feature_weights
        __gram_weights = self.gram_weights
        self.feature_weights = [0.01 for _ in __feature_weights]
        self.gram_weights = [1.0 for _ in __gram_weights]
        loss = self.make_loss_op(x, y)
        self.feature_weights = __feature_weights
        self.gram_weights = __gram_weights
        return loss

    def preprocess_input(self, x):
        """Preprocesses a tensor encoding a batch of images.

        1. Transform range [-1, 1] to [0, 255.0]
        2. center around imagenet mean RGB values

        Parameters
        ----------
        x : tf.Tenser
            input tensor, 4D in [-1,1]

        Returns
        -------
        Preprocessed tensor : tf.Tensor
        """
        # from [-1, 1] to [0, 255.0]
        x = (x + 1.0) / 2.0 * 255.0
        # 'RGB'->'BGR'
        x = x[:, :, :, ::-1]
        # Zero-center by mean pixel
        x = x - np.array([103.939, 116.779, 123.68]).reshape((1, 1, 1, 3))
        return x
Exemplo n.º 4
0
class VGG19Features(object):
    def __init__(self,
                 session,
                 feature_layers=None,
                 feature_weights=None,
                 gram_weights=None):
        K.set_session(session)
        self.base_model = VGG19(include_top=False, weights='imagenet')
        if feature_layers is None:
            feature_layers = [
                "input_1", "block1_conv2", "block2_conv2", "block3_conv2",
                "block4_conv2", "block5_conv2"
            ]
        self.layer_names = [l.val_set_name for l in self.base_model.layers]
        for k in feature_layers:
            if not k in self.layer_names:
                raise KeyError("Invalid layer {}. Available layers: {}".format(
                    k, self.layer_names))
        features = [
            self.base_model.get_layer(k).output for k in feature_layers
        ]
        self.model = Model(inputs=self.base_model.input, outputs=features)
        if feature_weights is None:
            feature_weights = len(feature_layers) * [1.0]
        if gram_weights is None:
            gram_weights = len(feature_layers) * [0.1]
        self.feature_weights = feature_weights
        self.gram_weights = gram_weights
        assert len(self.feature_weights) == len(features)
        self.use_gram = np.max(self.gram_weights) > 0.0

        self.variables = self.base_model.weights

    def extract_features(self, x):
        """x should be rgb in [-1,1]."""
        x = preprocess_input(x)
        features = self.model.predict(x)
        return features

    def make_feature_ops(self, x):
        """x should be rgb tensor in [-1,1]."""
        x = preprocess_input(x)
        features = self.model(x)
        return features

    def grams(self, fs):
        gs = list()
        for f in fs:
            bs, h, w, c = f.shape.as_list()
            f = tf.reshape(f, [bs, h * w, c])
            ft = tf.transpose(f, [0, 2, 1])
            g = tf.matmul(ft, f)
            g = g / (c * h * w)
            gs.append(g)
        return gs

    def make_loss_op(self, x, y):
        """x, y should be rgb tensors in [-1,1]."""
        x = preprocess_input(x)
        x_features = self.model(x)
        self.x_features = x_features

        y = preprocess_input(y)
        y_features = self.model(y)
        self.y_features = y_features

        x_grams = self.grams(x_features)
        self.x_grams = x_grams
        y_grams = self.grams(y_features)
        self.y_grams = y_grams

        losses = [
            tf.reduce_mean(tf.abs(xf - yf))
            for xf, yf in zip(x_features, y_features)
        ]
        gram_losses = [
            tf.reduce_mean(tf.abs(xg - yg))
            for xg, yg in zip(x_grams, y_grams)
        ]

        for i in range(len(losses)):
            losses[i] = self.feature_weights[i] * losses[i]
            gram_losses[i] = self.gram_weights[i] * gram_losses[i]

        loss = tf.add_n(losses)
        if self.use_gram:
            loss = loss + tf.add_n(gram_losses)
        self.losses = losses
        self.gram_losses = gram_losses
        return loss
Exemplo n.º 5
0
def get_activations(model, layer_name, input):
    # layer_name = 'abundances'
    intermediate_layer_model = Model(
        inputs=model.input, outputs=model.get_layer(layer_name).output)
    return intermediate_layer_model.predict(input)
Exemplo n.º 6
0
class VGG19Features(object):
    def __init__(self, session, feature_layers = None, feature_weights = None):
        K.set_session(session)
        self.base_model = VGG19(
                include_top = False,
                weights='imagenet')
        if feature_layers is None:
            feature_layers = [
                    "input_1",
                    "block1_conv2", "block2_conv2",
                    "block3_conv2", "block4_conv2",
                    "block5_conv2"]
        self.layer_names = [l.name for l in self.base_model.layers]
        for k in feature_layers:
            if not k in self.layer_names:
                raise KeyError(
                        "Invalid layer {}. Available layers: {}".format(
                            k, self.layer_names))
        features = [self.base_model.get_layer(k).output for k in feature_layers]
        self.model = Model(
                inputs = self.base_model.input,
                outputs = features)
        if feature_weights is None:
            feature_weights = len(feature_layers) * [1.0]
        self.feature_weights = feature_weights
        assert len(self.feature_weights) == len(features)

        self.variables = self.base_model.weights


    def extract_features(self, x):
        """x should be rgb in [-1,1]."""
        x = preprocess_input(x)
        features = self.model.predict(x)
        return features


    def make_feature_ops(self, x):
        """x should be rgb tensor in [-1,1]."""
        x = preprocess_input(x)
        features = self.model(x)
        return features


    def make_loss_op(self, x, y):
        """x, y should be rgb tensors in [-1,1]."""
        x = preprocess_input(x)
        x_features = self.model(x)

        y = preprocess_input(y)
        y_features = self.model(y)

        losses = [
                tf.reduce_mean(tf.abs(xf - yf)) for xf, yf in zip(
                    x_features, y_features)]
        for i in range(len(losses)):
            losses[i] = self.feature_weights[i] * losses[i]
        loss = tf.add_n(losses)

        self.losses = losses

        return loss
Exemplo n.º 7
0
    def on_epoch_end(self, epoch, logs=None):
        if self.endmembersGT is None:
            self.plotGT = False
        self.losses.append(logs.get('loss'))
        self.num_epochs = epoch
        # self.losses.append(logs.get('loss'))
        endmembers = self.model.get_layer('endmembers').get_weights()[0]
        if self.plotGT:
            sad = compute_ASAM_rad(endmembers, self.endmembersGT, None)
            self.sads.append(sad)
            # print('SAD: ' + str(sad))
            # spio.savemat('sad.mat', {'SAD': self.sads,'M':endmembers,'M_GT':self.endmembersGT})
            sio.savemat('sad.mat', {'SAD': self.sads, 'LOSS': self.losses})
        if self.plot_every_n == 0 or epoch % self.plot_every_n != 0: return
        if self.plotS:
            if self.num_inputs > 1:
                intermediate_layer_model = Model(
                    inputs=self.model.input,
                    outputs=[
                        self.model.get_layer('abundances' + str(i)).output
                        for i in range(self.num_inputs)
                    ])
                abundances = np.mean(intermediate_layer_model.predict(
                    [self.input.orig_data for i in range(self.num_inputs)]),
                                     axis=0)
            else:
                intermediate_layer_model = Model(
                    inputs=self.model.input,
                    outputs=self.model.get_layer('abundances').output)
                abundances = intermediate_layer_model.predict(
                    self.input.orig_data)
            if self.size is None:
                self.size = (int(np.sqrt(abundances.shape[0])),
                             int(np.sqrt(abundances.shape[0])))

        # plotHist(self.losses, 33)
        # self.plotGT = False
        if self.plotGT:
            dict = order_endmembers(endmembers, self.endmembersGT)
            # if self.is_GT_for_A:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, self.abundancesGT, dict, self.use_ASC)
            # else:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC, is_GT=False)
            plotEndmembersAndGT(self.endmembersGT, endmembers, dict)
            if self.plotS:
                plotAbundancesSimple(self.num_endmembers,
                                     (self.size[0], self.size[1]),
                                     abundances,
                                     dict,
                                     use_ASC=1,
                                     figure_nr=10)
        else:
            # plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC)
            plotEndmembers(self.num_endmembers, endmembers)
            plotAbundancesSimple(self.num_endmembers,
                                 (self.size[0], self.size[1]),
                                 abundances,
                                 None,
                                 use_ASC=1,
                                 figure_nr=10)
        return
Exemplo n.º 8
0
def transfer_model(source_df, target_df, test_df, method_flag, fold_num):

	source_labels, source_data = np.split(np.array(source_df),[1],axis=1)
	target_labels, target_data = np.split(np.array(target_df),[1],axis=1)
	test_labels, test_data = np.split(np.array(test_df),[1],axis=1)

	# normalization
	#normalized_source_data = pre.normalize(source_data)
	#normalized_target_data = pre.normalize(target_data)
	#normalized_test_data = pre.normalize(test_data)
	normalized_source_data = source_data
	normalized_target_data = target_data
	normalized_test_data = test_data


	### constuct model for source domain task  ###

	# optimization
	opt = Adam()

	# network setting
	latent = models.latent(normalized_source_data.shape[1])
	sll = models.source_last_layer()
	tll = models.target_last_layer()

	source_inputs = Input(shape=normalized_source_data.shape[1:])
	latent_features = latent(source_inputs)
	source_predictors = sll(latent_features)

	latent.trainable = mc._SORUCE_LATENT_TRAIN
	source_predictors.trainable = True

	source_nn = Model(inputs=[source_inputs], outputs=[source_predictors])
	source_nn.compile(loss=['mean_squared_error'],optimizer=opt)
	#source_nn.summary()

	# training using source domain data
	if method_flag != mc._SCRATCH:
		source_max_loop = int(normalized_source_data.shape[0]/mc._BATCH_SIZE)
		source_progbar = Progbar(target=mc._SOURCE_EPOCH_NUM)
		for epoch in range(mc._SOURCE_EPOCH_NUM):
			shuffle_data, shuffle_labels, _ = pre.paired_shuffle(normalized_source_data,source_labels,1)

			for loop in range(source_max_loop):
				batch_train_data = shuffle_data[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
				batch_train_labels = shuffle_labels[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
				batch_train_labels = np.reshape(batch_train_labels, [len(batch_train_labels)])
				one_hots = np.identity(mc._SOURCE_DIM_NUM)[np.array(batch_train_labels, dtype=np.int32)]
				loss = source_nn.train_on_batch([batch_train_data],[one_hots])

			#source_progbar.add(1, values=[("source loss",loss)])

		# save
		#latent.save('../results/source_latent.h5')
		#sll.save('../results/source_last_layer.h5')

	# compute relation vectors
	if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
		target_vectors = np.identity(mc._TARGET_DIM_NUM)[np.array(target_labels, dtype=np.int32)]
		target_vectors = np.reshape(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])
	elif method_flag == mc._COUNT_ATDL:
		target_labels, relations = rv.compute_relation_labels(source_nn, normalized_target_data, target_labels, fold_num)
		target_vectors = np.identity(mc._SOURCE_DIM_NUM)[np.array(target_labels, dtype=np.int32)]
		target_vectors = np.reshape(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])
	else:
		relation_vectors = rv.compute_relation_vectors(source_nn, normalized_target_data, target_labels, fold_num, method_flag)
		target_vectors = np.zeros((len(target_labels),mc._SOURCE_DIM_NUM), dtype=np.float32)
		for i in range(len(target_labels)):
			target_vectors[i] = relation_vectors[int(target_labels[i])]

	### tuning model for target domain task	 ###

	latent.trainable = mc._TARGET_LATENT_TRAIN
	target_inputs = Input(shape=normalized_target_data.shape[1:])
	latent_features = latent(target_inputs)
	if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
		predictors = tll(latent_features)
		label_num = mc._TARGET_DIM_NUM
	else:
		predictors= sll(latent_features)
		label_num = mc._SOURCE_DIM_NUM

	target_nn = Model(inputs=[target_inputs], outputs=[predictors])
	target_nn.compile(loss=['mean_squared_error'],optimizer=opt)
	#target_nn.summary()

	# training using target domain data
	target_max_loop = int(normalized_target_data.shape[0]/mc._BATCH_SIZE)
	target_progbar = Progbar(target=mc._TARGET_EPOCH_NUM)
	for epoch in range(mc._TARGET_EPOCH_NUM):

		shuffle_data, shuffle_labels, _ = \
		pre.paired_shuffle(normalized_target_data, target_vectors, label_num)
		for loop in range(target_max_loop):
			batch_train_data = shuffle_data[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
			batch_train_labels = shuffle_labels[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
			loss = target_nn.train_on_batch([batch_train_data],[batch_train_labels])
		#target_progbar.add(1, values=[("target loss",loss)])


	# compute outputs of test data of target domain
	x = target_nn.predict([normalized_test_data])
	if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
		idx = np.argmax(x, axis=1)
	elif method_flag == mc._COUNT_ATDL:
		idx = np.argmax(x,axis=1)
		for j in range(len(test_labels)):
			for i in range(mc._TARGET_DIM_NUM):
				if test_labels[j] == i:
					test_labels[j] = relations[i]
					break
	else:
		distance, idx = Neighbors(x, relation_vectors, 1)
		idx = idx[:,0]

	backend.clear_session()
	return idx.T, test_labels.T