예제 #1
0
파일: iFDD.py 프로젝트: zhexiaozhe/rlpy
 def __init__(self,
              domain,
              discovery_threshold,
              initial_representation,
              sparsify=True,
              discretization=20,
              debug=0,
              useCache=0,
              maxBatchDiscovery=1,
              batchThreshold=0,
              iFDDPlus=1,
              seed=1):
     self.iFDD_features = {}
     self.iFDD_potentials = {}
     self.featureIndex2feature = {}
     self.cache = {}
     self.discovery_threshold = discovery_threshold
     self.sparsify = sparsify
     self.setBinsPerDimension(domain, discretization)
     self.features_num = initial_representation.features_num
     self.debug = debug
     self.useCache = useCache
     self.maxBatchDiscovery = maxBatchDiscovery
     self.batchThreshold = batchThreshold
     self.sortediFDDFeatures = PriorityQueueWithNovelty()
     self.initial_representation = initial_representation
     self.iFDDPlus = iFDDPlus
     self.isDynamic = True
     self.addInitialFeatures()
     super(iFDD, self).__init__(domain, discretization, seed)
예제 #2
0
 def __init__(self, domain, kernel, active_threshold, discover_threshold,
              kernel_args=[], normalization=True, sparsify=True,
              max_active_base_feat=2, max_base_feat_sim=0.7):
     super(KernelizediFDD, self).__init__(domain)
     self.kernel = kernel
     self.kernel_args = kernel_args
     self.active_threshold = active_threshold
     self.discover_threshold = discover_threshold
     self.normalization = normalization
     self.sparsify = sparsify
     self.sorted_ids = PriorityQueueWithNovelty()
     self.max_active_base_feat = max_active_base_feat
     self.max_base_feat_sim = max_base_feat_sim
     self.candidates = {}
     self.features = []
     self.base_features_ids = []
     self.max_relevance = 0.
예제 #3
0
파일: iFDD.py 프로젝트: smcgregor/rlpy
 def __init__(
         self, domain, discovery_threshold, initial_representation, sparsify=True,
         discretization=20, debug=0, useCache=0, maxBatchDiscovery=1, batchThreshold=0, iFDDPlus=1):
     self.iFDD_features = {}
     self.iFDD_potentials = {}
     self.featureIndex2feature = {}
     self.cache = {}
     self.discovery_threshold = discovery_threshold
     self.sparsify = sparsify
     self.setBinsPerDimension(domain, discretization)
     self.features_num = initial_representation.features_num
     self.debug = debug
     self.useCache = useCache
     self.maxBatchDiscovery = maxBatchDiscovery
     self.batchThreshold = batchThreshold
     self.sortediFDDFeatures = PriorityQueueWithNovelty()
     self.initial_representation = initial_representation
     self.iFDDPlus = iFDDPlus
     self.isDynamic = True
     self.addInitialFeatures()
     super(iFDD, self).__init__(domain, discretization)
예제 #4
0
파일: iFDD.py 프로젝트: zhexiaozhe/rlpy
class iFDD(Representation):
    ''' The incremental Feature Dependency Discovery Representation based on
    [Geramifard et al. 2011 ICML paper]. This representation starts with a set of given
    binary features and adds new features as the conjunction of existing features. Given n features
    iFDD can expand the set of features up to 2^n-1 features (i.e. conjunction of each subset of n
    features can be considered as a new feature.
    '''

    # It is a good starting point to see how relevances grow if threshold is
    # set to infinity.
    PRINT_MAX_RELEVANCE = False
    discovery_threshold = None  # psi in the paper
    # boolean specifying the use of the trick mentioned in the paper so that
    # features are getting sparser with more feature discoveries (i.e. use
    # greedy algorithm for feature activation)
    sparsify = None
    # dictionary mapping initial feature sets to iFDD_feature
    iFDD_features = None
    # dictionary mapping initial feature sets to iFDD_potential
    iFDD_potentials = None
    # dictionary mapping each feature index (ID) to its feature object
    featureIndex2feature = None
    debug = 0  # Print more stuff
    # dictionary mapping  initial active feature set phi_0(s) to its
    # corresponding active features at phi(s). Based on Tuna's Trick to speed
    # up iFDD
    cache = None
    # this should only increase speed. If results are different something is
    # wrong
    useCache = 0
    # Number of features to be expanded in the batch setting
    maxBatchDiscovery = 0
    # Minimum value of feature relevance for the batch setting
    batchThreshold = 0
    # ICML 11 iFDD would add sum of abs(TD-errors) while the iFDD plus uses
    # the abs(sum(TD-Error))/sqrt(potential feature presence count)
    iFDDPlus = 0
    # This is a priority queue based on the size of the features (Largest ->
    # Smallest). For same size features, it is also sorted based on the newest
    # -> oldest. Each element is the pointer to feature object.
    sortediFDDFeatures = None
    # A Representation that provides the initial set of features for iFDD
    initial_representation = None
    # Helper parameter to get a sense of appropriate threshold on the
    # relevance for discovery
    maxRelevance = -np.inf
    # As Christoph mentioned adding new features may affect the phi for all
    # states. This idea was to make sure both conditions for generating active
    # features generate the same result.
    use_chirstoph_ordered_features = True

    def __init__(self,
                 domain,
                 discovery_threshold,
                 initial_representation,
                 sparsify=True,
                 discretization=20,
                 debug=0,
                 useCache=0,
                 maxBatchDiscovery=1,
                 batchThreshold=0,
                 iFDDPlus=1,
                 seed=1):
        self.iFDD_features = {}
        self.iFDD_potentials = {}
        self.featureIndex2feature = {}
        self.cache = {}
        self.discovery_threshold = discovery_threshold
        self.sparsify = sparsify
        self.setBinsPerDimension(domain, discretization)
        self.features_num = initial_representation.features_num
        self.debug = debug
        self.useCache = useCache
        self.maxBatchDiscovery = maxBatchDiscovery
        self.batchThreshold = batchThreshold
        self.sortediFDDFeatures = PriorityQueueWithNovelty()
        self.initial_representation = initial_representation
        self.iFDDPlus = iFDDPlus
        self.isDynamic = True
        self.addInitialFeatures()
        super(iFDD, self).__init__(domain, discretization, seed)

    def phi_nonTerminal(self, s):
        """ Based on Tuna's Master Thesis 2012 """
        F_s = np.zeros(self.features_num, 'bool')
        F_s_0 = self.initial_representation.phi_nonTerminal(s)
        activeIndices = np.where(F_s_0 != 0)[0]
        if self.useCache:
            finalActiveIndices = self.cache.get(frozenset(activeIndices))
            if finalActiveIndices is None:
                # run regular and update the cache
                finalActiveIndices = self.findFinalActiveFeatures(
                    activeIndices)
        else:
            finalActiveIndices = self.findFinalActiveFeatures(activeIndices)
        F_s[finalActiveIndices] = 1
        return F_s

    def findFinalActiveFeatures(self, intialActiveFeatures):
        """
        Given the active indices of phi_0(s) find the final active indices of phi(s) based on discovered features
        """
        finalActiveFeatures = []
        k = len(intialActiveFeatures)
        initialSet = set(intialActiveFeatures)

        if 2**k <= self.features_num:
            # k can be big which can cause this part to be very slow
            # if k is large then find active features by enumerating on the
            # discovered features.
            if self.use_chirstoph_ordered_features:
                for i in range(k, 0, -1):
                    if len(initialSet) == 0:
                        break
                    # generate list of all combinations with i elements
                    cand_i = [(c, self.iFDD_features[frozenset(c)].index)
                              for c in combinations(initialSet, i)
                              if frozenset(c) in self.iFDD_features]
                    # sort (recent features (big ids) first)
                    cand_i.sort(key=lambda x: x[1], reverse=True)
                    # idx = -1
                    for candidate, ind in cand_i:
                        # the next block is for testing only
                        # cur_idx = self.iFDD_features[frozenset(candidate)].index
                        # if idx > 0:
                        #    assert(idx > cur_idx)
                        # idx = cur_idx

                        if len(initialSet) == 0:
                            # No more initial features to be mapped to extended
                            # ones
                            break

                        # This was missing from ICML 2011 paper algorithm.
                        # Example: [0,1,20], [0,20] is discovered, but if [0]
                        # is checked before [1] it will be added even though it
                        # is already covered by [0,20]
                        if initialSet.issuperset(set(candidate)):
                            feature = self.iFDD_features.get(
                                frozenset(candidate))
                            if feature is not None:
                                finalActiveFeatures.append(feature.index)
                                if self.sparsify:
                                    # print "Sets:", initialSet, feature.f_set
                                    initialSet = initialSet - feature.f_set
                                    # print "Remaining Set:", initialSet
            else:
                for candidate in powerset(initialSet, ascending=0):
                    if len(initialSet) == 0:
                        # No more initial features to be mapped to extended
                        # ones
                        break

                    # This was missing from ICML 2011 paper algorithm. Example:
                    # [0,1,20], [0,20] is discovered, but if [0] is checked
                    # before [1] it will be added even though it is already
                    # covered by [0,20]
                    if initialSet.issuperset(set(candidate)):
                        feature = self.iFDD_features.get(frozenset(candidate))
                        if feature is not None:
                            finalActiveFeatures.append(feature.index)
                            if self.sparsify:
                                # print "Sets:", initialSet, feature.f_set
                                initialSet = initialSet - feature.f_set
                                # print "Remaining Set:", initialSet
        else:
            # print "********** Using Alternative: %d > %d" % (2**k, self.features_num)
            # Loop on all features sorted on their size and then novelty and
            # activate features
            for feature in self.sortediFDDFeatures.toList():
                if len(initialSet) == 0:
                    # No more initial features to be mapped to extended ones
                    break

                if initialSet.issuperset(set(feature.f_set)):
                    finalActiveFeatures.append(feature.index)
                    if self.sparsify:
                        # print "Sets:", initialSet, feature.f_set
                        initialSet = initialSet - feature.f_set
                        # print "Remaining Set:", initialSet

        if self.useCache:
            self.cache[frozenset(intialActiveFeatures)] = finalActiveFeatures
        return finalActiveFeatures

    def post_discover(self, s, terminal, a, td_error, phi_s):
        """
        returns the number of added features
        """
        # Indices of non-zero elements of vector phi_s
        activeFeatures = phi_s.nonzero()[0]
        discovered = 0
        for g_index, h_index in combinations(activeFeatures, 2):
            discovered += self.inspectPair(g_index, h_index, td_error)
        return discovered

    def inspectPair(self, g_index, h_index, td_error):
        # Inspect feature f = g union h where g_index and h_index are the indices of features g and h
        # If the relevance is > Threshold add it to the list of features
        # Returns True if a new feature is added
        g = self.featureIndex2feature[g_index].f_set
        h = self.featureIndex2feature[h_index].f_set
        f = g.union(h)
        feature = self.iFDD_features.get(f)
        if not self.iFDDPlus:
            td_error = abs(td_error)

        if feature is not None:
            # Already exists
            return False

        # Look it up in potentials
        potential = self.iFDD_potentials.get(f)
        if potential is None:
            # Generate a new potential and put it in the dictionary
            potential = iFDD_potential(f, g_index, h_index)
            self.iFDD_potentials[f] = potential

        potential.cumtderr += td_error
        potential.cumabstderr += abs(td_error)
        potential.count += 1
        # Check for discovery

        if self.random_state.rand() < self.iFDDPlus:
            relevance = abs(potential.cumtderr) / np.sqrt(potential.count)
        else:
            relevance = potential.cumabstderr

        if relevance >= self.discovery_threshold:
            self.maxRelevance = -np.inf
            self.addFeature(potential)
            return True
        else:
            self.updateMaxRelevance(relevance)
            return False

    def show(self):
        self.showFeatures()
        self.showPotentials()
        self.showCache()

    def updateWeight(self, p1_index, p2_index):
        # Add a new weight corresponding to the new added feature for all actions.
        # The new weight is set to zero if sparsify = False, and equal to the
        # sum of weights corresponding to the parents if sparsify = True
        a = self.domain.actions_num
        # Number of feature before adding the new one
        f = self.features_num - 1
        if self.sparsify:
            newElem = (self.weight_vec[p1_index::f] +
                       self.weight_vec[p2_index::f]).reshape((-1, 1))
        else:
            newElem = None
        self.weight_vec = addNewElementForAllActions(self.weight_vec, a,
                                                     newElem)
        # We dont want to reuse the hased phi because phi function is changed!
        self.hashed_s = None

    def addInitialFeatures(self):
        for i in xrange(self.initial_representation.features_num):
            feature = iFDD_feature(i)
            # shout(self,self.iFDD_features[frozenset([i])].index)
            self.iFDD_features[frozenset([i])] = feature
            self.featureIndex2feature[feature.index] = feature
            # priority is 1/number of initial features corresponding to the
            # feature
            priority = 1
            self.sortediFDDFeatures.push(priority, feature)

    def addFeature(self, potential):
        # Add it to the list of features
        # Features_num is always one more than the max index (0-based)
        potential.index = self.features_num
        self.features_num += 1
        feature = iFDD_feature(potential)
        self.iFDD_features[potential.f_set] = feature
        # Expand the size of the weight_vec
        self.updateWeight(feature.p1, feature.p2)
        # Update the index to feature dictionary
        self.featureIndex2feature[feature.index] = feature
        # print "IN IFDD, New Feature = %d => Total Features = %d" % (feature.index, self.features_num)
        # Update the sorted list of features
        # priority is 1/number of initial features corresponding to the feature
        priority = 1 / (len(potential.f_set) * 1.)
        self.sortediFDDFeatures.push(priority, feature)

        # If you use cache, you should invalidate entries that their initial
        # set contains the set corresponding to the new feature
        if self.useCache:
            for initialActiveFeatures in self.cache.keys():
                if initialActiveFeatures.issuperset(feature.f_set):
                    if self.sparsify:
                        self.cache.pop(initialActiveFeatures)
                    else:
                        # If sparsification is not used, simply add the new
                        # feature id to all cached values that have feature set
                        # which is a super set of the features corresponding to
                        # the new discovered feature
                        self.cache[initialActiveFeatures].append(feature.index)
        if self.debug:
            self.show()

    def batchDiscover(self, td_errors, phi, states):
        # Discovers features using iFDD in batch setting.
        # TD_Error: p-by-1 (How much error observed for each sample)
        # phi: n-by-p features corresponding to all samples (each column corresponds to one sample)
        # self.batchThreshold is the minimum relevance value for the feature to
        # be expanded
        SHOW_PLOT = 0  # Shows the histogram of relevances
        maxDiscovery = self.maxBatchDiscovery
        n = self.features_num  # number of features
        p = len(td_errors)  # Number of samples
        counts = np.zeros((n, n))
        relevances = np.zeros((n, n))
        for i in xrange(p):
            phiphiT = np.outer(phi[i, :], phi[i, :])
            if self.iFDDPlus:
                relevances += phiphiT * td_errors[i]
            else:
                relevances += phiphiT * abs(td_errors[i])
            counts += phiphiT
        # Remove Diagonal and upper part of the relevances as they are useless
        relevances = np.triu(relevances, 1)
        non_zero_index = np.nonzero(relevances)
        if self.iFDDPlus:
            # Calculate relevances based on theoretical results of ICML 2013
            # potential submission
            relevances[non_zero_index] = np.divide(
                np.abs(relevances[non_zero_index]),
                np.sqrt(counts[non_zero_index]))
        else:
            # Based on Geramifard11_ICML Paper
            relevances[non_zero_index] = relevances[non_zero_index]

        # Find indexes to non-zero excited pairs
        # F1 and F2 are the parents of the potentials
        (F1, F2) = relevances.nonzero()
        relevances = relevances[F1, F2]
        if len(relevances) == 0:
            # No feature to add
            self.logger.debug("iFDD Batch: Max Relevance = 0")
            return False

        if SHOW_PLOT:
            e_vec = relevances.flatten()
            e_vec = e_vec[e_vec != 0]
            e_vec = np.sort(e_vec)
            plt.ioff()
            plt.plot(e_vec, linewidth=3)
            plt.show()

        # Sort based on relevances
        # We want high to low hence the reverse: [::-1]
        sortedIndices = np.argsort(relevances)[::-1]
        max_relevance = relevances[sortedIndices[0]]
        # Add top <maxDiscovery> features
        self.logger.debug(
            "iFDD Batch: Max Relevance = {0:g}".format(max_relevance))
        added_feature = False
        new_features = 0
        for j in xrange(len(relevances)):
            if new_features >= maxDiscovery:
                break
            max_index = sortedIndices[j]
            f1 = F1[max_index]
            f2 = F2[max_index]
            relevance = relevances[max_index]
            if relevance > self.batchThreshold:
                # print "Inspecting",
                # f1,f2,'=>',self.getStrFeatureSet(f1),self.getStrFeatureSet(f2)
                if self.inspectPair(f1, f2, np.inf):
                    self.logger.debug(
                        'New Feature %d: %s, Relevance = %0.3f' %
                        (self.features_num - 1,
                         self.getStrFeatureSet(self.features_num - 1),
                         relevances[max_index]))
                    new_features += 1
                    added_feature = True
            else:
                # Because the list is sorted, there is no use to look at the
                # others
                break
        return (
            # A signal to see if the representation has been expanded or not
            added_feature)

    def showFeatures(self):
        print "Features:"
        print "-" * 30
        print " index\t| f_set\t| p1\t| p2\t | Weights (per action)"
        print "-" * 30
        for feature in reversed(self.sortediFDDFeatures.toList()):
            # for feature in self.iFDD_features.itervalues():
            # print " %d\t| %s\t| %s\t| %s\t| %s" %
            # (feature.index,str(list(feature.f_set)),feature.p1,feature.p2,str(self.weight_vec[feature.index::self.features_num]))
            print " %d\t| %s\t| %s\t| %s\t| Omitted" % (
                feature.index, self.getStrFeatureSet(
                    feature.index), feature.p1, feature.p2)

    def showPotentials(self):
        print "Potentials:"
        print "-" * 30
        print " index\t| f_set\t| relevance\t| count\t| p1\t| p2"
        print "-" * 30
        for _, potential in self.iFDD_potentials.iteritems():
            print " %d\t| %s\t| %0.2f\t| %d\t| %s\t| %s" % (
                potential.index, str(np.sort(list(
                    potential.f_set))), potential.relevance, potential.count,
                potential.p1, potential.p2)

    def showCache(self):
        if self.useCache:
            print "Cache:"
            if len(self.cache) == 0:
                print 'EMPTY!'
                return
            print "-" * 30
            print " initial\t| Final"
            print "-" * 30
            for initial, active in self.cache.iteritems():
                print " %s\t| %s" % (str(list(initial)), active)

    def updateMaxRelevance(self, newRelevance):
        # Update a global max relevance and outputs it if it is updated
        if self.maxRelevance < newRelevance:
            self.maxRelevance = newRelevance
            if self.PRINT_MAX_RELEVANCE:
                self.logger.debug(
                    "iFDD Batch: Max Relevance = {0:g}".format(newRelevance))

    def getFeature(self, f_id):
        # returns a feature given a feature id
        if f_id in self.featureIndex2feature.keys():
            return self.featureIndex2feature[f_id]
        else:
            print "F_id %d is not valid" % f_id
            return None

    def getStrFeatureSet(self, f_id):
        # returns a string that corresponds to the set of features specified by
        # the given feature_id
        if f_id in self.featureIndex2feature.keys():
            return str(sorted(list(self.featureIndex2feature[f_id].f_set)))
        else:
            print "F_id %d is not valid" % f_id
            return None

    def featureType(self):
        return bool

    def __deepcopy__(self, memo):
        ifdd = iFDD(self.domain, self.discovery_threshold,
                    self.initial_representation, self.sparsify,
                    self.discretization, self.debug, self.useCache,
                    self.maxBatchDiscovery, self.batchThreshold, self.iFDDPlus)
        for s, f in self.iFDD_features.items():
            new_f = deepcopy(f)
            new_s = deepcopy(s)
            ifdd.iFDD_features[new_s] = new_f
            ifdd.featureIndex2feature[new_f.index] = new_f
        for s, p in self.iFDD_potentials.items():
            new_s = deepcopy(s)
            new_p = deepcopy(p)
            ifdd.iFDD_potentials[new_s] = deepcopy(new_p)
        ifdd.cache = deepcopy(self.cache)
        ifdd.sortediFDDFeatures = deepcopy(self.sortediFDDFeatures)
        ifdd.features_num = self.features_num
        ifdd.weight_vec = deepcopy(self.weight_vec)
        return ifdd
예제 #5
0
class KernelizediFDD(Representation):

    """
    Kernelized version of iFDD
    """
    features = []
    candidates = {}
    # contains a set for each feature indicating the ids of
    base_id_sets = set()
                       # 1-dim features it refines
    base_feature_ids = []
    max_relevance = 0.

    def __init__(self, domain, kernel, active_threshold, discover_threshold,
                 kernel_args=[], normalization=True, sparsify=True,
                 max_active_base_feat=2, max_base_feat_sim=0.7):
        super(KernelizediFDD, self).__init__(domain)
        self.kernel = kernel
        self.kernel_args = kernel_args
        self.active_threshold = active_threshold
        self.discover_threshold = discover_threshold
        self.normalization = normalization
        self.sparsify = sparsify
        self.sorted_ids = PriorityQueueWithNovelty()
        self.max_active_base_feat = max_active_base_feat
        self.max_base_feat_sim = max_base_feat_sim
        self.candidates = {}
        self.features = []
        self.base_features_ids = []
        self.max_relevance = 0.
    def show_features(self):
        l = self.sorted_ids.toList()[:]
        key = lambda x: (
            len(self.features[x].base_ids),
            tuple(self.features[x].dim),
            tuple(self.features[x].center[self.features[x].dim]))
        l.sort(key=key)
        for i in l:
            f = self.features[i]
            print "{:>5} {:>20}".format(i, f)

    def plot_1d_features(self, dimension_idx=None):
        """Creates a plot for each specified dimension of the state space and shows
        all 1-dimensional features in this dimension
        If no indices are passed, all dimensions are plotted

        dimension_idx: either a single dimension index (int) or a list of indices.
        """
        idx = dimension_idx
        if isinstance(idx, int):
            idx = [idx]
        elif idx is None:
            idx = self.domain.continuous_dims

        feat_list = range(self.features_num)
        key = lambda x: (
            len(self.features[x].base_ids),
            tuple(self.features[x].dim),
            tuple(self.features[x].center[self.features[x].dim]))
        feat_list.sort(key=key)
        last_i = -1
        for k in feat_list:
            if len(self.features[k].dim) > 1:
                break
            cur_i = self.features[k].dim[0]
            if cur_i != last_i:
                if last_i in idx:
                    plt.draw()
                if cur_i in idx:
                    xi = np.linspace(
                        self.domain.statespace_limits[
                            cur_i,
                            0],
                        self.domain.statespace_limits[
                            cur_i,
                            1],
                        200)
                    x = np.zeros((200, self.domain.statespace_limits.shape[0]))
                    x[:, cur_i] = xi
                    plt.figure("Feature Dimension {}".format(cur_i))
            if cur_i in idx:
                y = [self.features[k].output(xk) for xk in x]
                plt.plot(x, y, label="id {}".format(k))
            last_i = cur_i
        plt.draw()

    def plot_2d_features(self, d1=None, d2=None, n_lines=3):
        """
        plot contours of all 2-dimensional features covering
        dimension d1 and d2. For each feature, n_lines number of lines
        are shown.
        If no dimensions are specified, the first two continuous dimensions
        are shown.

        d1, d2: indices of dimensions to show
        n_lines: number of countour lines per feature (default: 3)
        """
        if d1 is None and d2 is None:
            # just take the first two dimensions
            idx = self.domain.continuous_dims[:2]
        else:
            idx = [d1, d2]
        idx.sort()

        feat_list = range(self.features_num)
        key = lambda x: (
            len(self.features[x].base_ids),
            tuple(self.features[x].dim),
            tuple(self.features[x].center[self.features[x].dim]))
        feat_list.sort(key=key)
        last_i = -1
        last_j = -1
        for k in feat_list:
            if len(self.features[k].dim) < 2:
                continue
            elif len(self.features[k].dim) > 2:
                break
            cur_i = self.features[k].dim[0]
            cur_j = self.features[k].dim[1]
            if cur_i != last_i or cur_j != last_j:
                if last_i in idx and last_j in idx:
                    plt.draw()
                if cur_i in idx and cur_j in idx:
                    xi = np.linspace(
                        self.domain.statespace_limits[
                            cur_i,
                            0],
                        self.domain.statespace_limits[
                            cur_i,
                            1],
                        100)
                    xj = np.linspace(
                        self.domain.statespace_limits[
                            cur_j,
                            0],
                        self.domain.statespace_limits[
                            cur_j,
                            1],
                        100)
                    X, Y = np.meshgrid(xi, xj)
                    plt.figure(
                        "Feature Dimensions {} and {}".format(cur_i, cur_j))
            if cur_i in idx and cur_j in idx:
                Z = np.zeros_like(X)
                for m in xrange(100):
                    for n in xrange(100):
                        x = np.zeros(self.domain.statespace_limits.shape[0])
                        x[cur_i] = X[m, n]
                        x[cur_j] = Y[m, n]
                        Z[m, n] = self.features[k].output(x)
                plt.contour(X, Y, Z, n_lines)
            last_i = cur_i
            last_j = cur_j
        plt.draw()

    def plot_2d_feature_centers(self, d1=None, d2=None):
        """
        plot the centers of all 2-dimensional features covering
        dimension d1 and d2.
        If no dimensions are specified, the first two continuous dimensions
        are shown.

        d1, d2: indices of dimensions to show
        """
        if d1 is None and d2 is None:
            # just take the first two dimensions
            idx = self.domain.continuous_dims[:2]
        else:
            idx = [d1, d2]
        idx.sort()

        feat_list = range(self.features_num)
        key = lambda x: (
            len(self.features[x].base_ids),
            tuple(self.features[x].dim),
            tuple(self.features[x].center[self.features[x].dim]))
        feat_list.sort(key=key)
        last_i = -1
        last_j = -1
        for k in feat_list:
            if len(self.features[k].dim) < 2:
                continue
            elif len(self.features[k].dim) > 2:
                break
            cur_i = self.features[k].dim[0]
            cur_j = self.features[k].dim[1]
            if cur_i != last_i or cur_j != last_j:
                if last_i in idx and last_j in idx:
                    plt.draw()
                if cur_i in idx and cur_j in idx:
                    plt.figure(
                        "Feature Dimensions {} and {}".format(cur_i, cur_j))
            if cur_i in idx and cur_j in idx:
                plt.plot(
                    [self.features[k].center[cur_i]],
                    [self.features[k].center[cur_j]],
                    "r",
                    marker="x")
            last_i = cur_i
            last_j = cur_j
        plt.draw()

    def phi_nonTerminal(self, s):
        out = np.zeros(self.features_num)
        if not self.sparsify:
            for i in xrange(self.features_num):
                out[i] = self.features[i].output(s)
        else:
            # get all base feature values and check if they are activated
            active_bases = set([])
            for i in self.sorted_ids.toList()[::-1]:
                if len(self.features[i].base_ids) > 1:
                    break
                if self.features[i].output(s) >= self.active_threshold:
                    active_bases.add(i)

            base_vals = {k: 1. for k in active_bases}
            # iterate over the remaining compound features
            for i in self.sorted_ids.toList():
                if active_bases.issuperset(self.features[i].base_ids):
                    if self.sparsify > 1:
                        out[i] = self.features[i].output(s)
                        if self.sparsify > 2 or out[i] >= self.active_threshold:
                            active_bases -= self.features[i].base_ids
                    else:
                        u = 0
                        for k in self.features[i].base_ids:
                            u = max(u, base_vals[k])
                        out[i] = self.features[i].output(s) * u

                        for k in self.features[i].base_ids:
                            base_vals[k] -= out[i]
                            if base_vals[k] < 0:
                                active_bases.remove(k)

        if self.normalization:
            summ = out.sum()
            if summ != 0:
                out /= out.sum()
        return out

    def phi_raw(self, s, terminal):
        assert(terminal is False)
        out = np.zeros(self.features_num)
        for i in xrange(self.features_num):
            out[i] = self.features[i].output(s)
        return out

    #@profile
    def post_discover(self, s, terminal, a, td_error, phi_s=None):
        if phi_s is None:
            phi_s = self.phi(s, terminal)
        phi_s_unnorm = self.phi_raw(s, terminal)
        discovered = 0
        Q = self.Qs(s, terminal, phi_s=phi_s).reshape(-1, 1)
        # indices of active features
        active_indices = list(
            np.where(phi_s_unnorm > self.active_threshold)[0])
        # "active indices", active_indices
        # gather all dimensions regarded by active features
        active_dimensions = np.zeros((len(s)), dtype="int")
        closest_neighbor = np.zeros((len(s)))
        for i in active_indices:
            for j in self.features[i].dim:
                active_dimensions[j] += 1
                closest_neighbor[j] = max(closest_neighbor[j], phi_s_unnorm[i])

        # add new base features for all dimension not regarded
        for j in xrange(len(s)):
            if active_dimensions[j] < self.max_active_base_feat and (closest_neighbor[j] < self.max_base_feat_sim or active_dimensions[j] < 1):
                active_indices.append(self.add_base_feature(s, j, Q=Q))
                discovered += 1

        # update relevance statistics of all feature candidates
        if discovered:
            phi_s = self.phi(s, terminal)
        la = len(active_indices)
        if la * (la - 1) < len(self.candidates):
            for ind, cand in self.candidates.items():
                g, h = ind
                rel = self.update_relevance_stat(
                    cand,
                    g,
                    h,
                    td_error,
                    s,
                    a,
                    phi_s)
                self.max_relevance = max(rel, self.max_relevance)
                # add if relevance is high enough
                if rel > self.discover_threshold:
                    self.add_refined_feature(g, h, Q=Q)
                    discovered += 1

        else:
            # the result of both branches can be very different as this one
            # updates only combinations which are considered active.
            for g, h in combinations(active_indices, 2):
                # note: g, h are ordered as active_indices are ordered
                cand = self.candidates.get((g, h))
                if cand is None:
                    continue
                rel = self.update_relevance_stat(
                    cand,
                    g,
                    h,
                    td_error,
                    s,
                    a,
                    phi_s)
                self.max_relevance = max(rel, self.max_relevance)
                # add if relevance is high enough
                if rel > self.discover_threshold:
                    self.add_refined_feature(g, h, Q=Q)
                    discovered += 1

        if discovered:
            self.max_relevance = 0.
        return discovered

    def update_relevance_stat(
            self, candidate, index1, index2, td_error, s, a, phi_s):
        """
        make sure that inputs are ordered, i.e.,index1 <= index2!
        returns the relevance of a potential feature combination
        """
        candidate.td_error_sum += phi_s[index1] * phi_s[index2] * td_error
        candidate.activation_count += phi_s[index1] ** 2 * phi_s[index2] ** 2
        if candidate.activation_count == 0.:
            return 0.
        rel = np.abs(candidate.td_error_sum) / \
            np.sqrt(candidate.activation_count)
        return rel

    def add_base_feature(self, center, dim, Q):
        """
        adds a new 1-dimensional feature and returns its index
        """
        new_f = KernelizedFeature(
            center=center, dim=[dim], kernel_args=self.kernel_args,
            kernel=self.kernel, index=self.features_num)
        self.features.append(new_f)

        self.base_id_sets.add(new_f.base_ids)
        self.sorted_ids.push(-1, self.features_num)
        self.logger.debug(
            "Added Feature {} {}".format(
                self.features_num,
                new_f))

        # add combinations with all existing features as candidates
        new_cand = {(f, self.features_num): Candidate(f, self.features_num)
                    for f in xrange(self.features_num) if dim not in self.features[f].dim}

        self.candidates.update(new_cand)
        for f, _ in new_cand.keys():
            self.base_id_sets.add(new_f.base_ids | self.features[f].base_ids)
        self.features_num += 1

        # add parameter dimension
        if self.normalization:
            self.weight_vec = addNewElementForAllActions(
                self.weight_vec,
                self.domain.actions_num,
                Q)
        else:
            self.weight_vec = addNewElementForAllActions(
                self.weight_vec,
                self.domain.actions_num)
        return self.features_num - 1

    def add_refined_feature(self, index1, index2, Q):
        """
        adds the combination of 2 existing features to the representation
        """
        f1 = self.features[index1]
        f2 = self.features[index2]
        new_center = np.zeros_like(f1.center)
        cnt = np.zeros_like(f1.center)
        cnt[f1.dim] += 1
        cnt[f2.dim] += 1
        cnt[cnt == 0] = 1.
        new_center[f1.dim] += f1.center[f1.dim]
        new_center[f2.dim] += f2.center[f2.dim]
        new_center /= cnt
        new_dim = list(frozenset(f1.dim) | frozenset(f2.dim))
        new_base_ids = f1.base_ids | f2.base_ids
        new_dim.sort()
        new_f = KernelizedFeature(center=new_center, dim=new_dim,
                                  kernel_args=self.kernel_args,
                                  kernel=self.kernel, index=self.features_num,
                                  base_ids=new_base_ids)
        self.features.append(new_f)
        # Priority is the negative number of base ids
        self.sorted_ids.push(-len(new_f.base_ids), self.features_num)
        #assert(len(self.sorted_ids.toList()) == self.features_num + 1)
        self.base_id_sets.add(new_f.base_ids)
        del self.candidates[(index1, index2)]

        # add new candidates
        new_cand = {(f, self.features_num): Candidate(f, self.features_num) for f in xrange(self.features_num)
                    if (self.features[f].base_ids | new_base_ids) not in self.base_id_sets
                    and len(frozenset(self.features[f].dim) & frozenset(new_dim)) == 0}
        for c, _ in new_cand.keys():
            self.base_id_sets.add(new_base_ids | self.features[c].base_ids)
        self.candidates.update(new_cand)
        self.logger.debug(
            "Added refined feature {} {}".format(
                self.features_num,
                new_f))
        self.logger.debug("{} candidates".format(len(self.candidates)))
        self.features_num += 1
        if self.normalization:
            self.weight_vec = addNewElementForAllActions(
                self.weight_vec,
                self.domain.actions_num,
                Q)
        else:
            self.weight_vec = addNewElementForAllActions(
                self.weight_vec,
                self.domain.actions_num)

        return self.features_num - 1
예제 #6
0
파일: iFDD.py 프로젝트: smcgregor/rlpy
class iFDD(Representation):
    # It is a good starting point to see how relevances grow if threshold is
    # set to infinity.
    PRINT_MAX_RELEVANCE = False
    discovery_threshold = None  # psi in the paper
    # boolean specifying the use of the trick mentioned in the paper so that
    # features are getting sparser with more feature discoveries (i.e. use
    # greedy algorithm for feature activation)
    sparsify = None
    # dictionary mapping initial feature sets to iFDD_feature
    iFDD_features = None
    # dictionary mapping initial feature sets to iFDD_potential
    iFDD_potentials = None
    # dictionary mapping each feature index (ID) to its feature object
    featureIndex2feature = None
    debug = 0     # Print more stuff
    # dictionary mapping  initial active feature set phi_0(s) to its
    # corresponding active features at phi(s). Based on Tuna's Trick to speed
    # up iFDD
    cache = None
    # this should only increase speed. If results are different something is
    # wrong
    useCache = 0
    # Number of features to be expanded in the batch setting
    maxBatchDiscovery = 0
    # Minimum value of feature relevance for the batch setting
    batchThreshold = 0
    # ICML 11 iFDD would add sum of abs(TD-errors) while the iFDD plus uses
    # the abs(sum(TD-Error))/sqrt(potential feature presence count)
    iFDDPlus = 0
    # This is a priority queue based on the size of the features (Largest ->
    # Smallest). For same size features, it is also sorted based on the newest
    # -> oldest. Each element is the pointer to feature object.
    sortediFDDFeatures = None
    # A Representation that provides the initial set of features for iFDD
    initial_representation = None
    # Helper parameter to get a sense of appropriate threshold on the
    # relevance for discovery
    maxRelevance = -np.inf
    # As Christoph mentioned adding new features may affect the phi for all
    # states. This idea was to make sure both conditions for generating active
    # features generate the same result.
    use_chirstoph_ordered_features = True

    def __init__(
            self, domain, discovery_threshold, initial_representation, sparsify=True,
            discretization=20, debug=0, useCache=0, maxBatchDiscovery=1, batchThreshold=0, iFDDPlus=1):
        self.iFDD_features = {}
        self.iFDD_potentials = {}
        self.featureIndex2feature = {}
        self.cache = {}
        self.discovery_threshold = discovery_threshold
        self.sparsify = sparsify
        self.setBinsPerDimension(domain, discretization)
        self.features_num = initial_representation.features_num
        self.debug = debug
        self.useCache = useCache
        self.maxBatchDiscovery = maxBatchDiscovery
        self.batchThreshold = batchThreshold
        self.sortediFDDFeatures = PriorityQueueWithNovelty()
        self.initial_representation = initial_representation
        self.iFDDPlus = iFDDPlus
        self.isDynamic = True
        self.addInitialFeatures()
        super(iFDD, self).__init__(domain, discretization)

    def phi_nonTerminal(self, s):
        """ Based on Tuna's Master Thesis 2012 """
        F_s = np.zeros(self.features_num, 'bool')
        F_s_0 = self.initial_representation.phi_nonTerminal(
            s)
        activeIndices = np.where(F_s_0 != 0)[0]
        if self.useCache:
            finalActiveIndices = self.cache.get(frozenset(activeIndices))
            if finalActiveIndices is None:
                # run regular and update the cache
                finalActiveIndices = self.findFinalActiveFeatures(
                    activeIndices)
        else:
            finalActiveIndices = self.findFinalActiveFeatures(
                activeIndices)
        F_s[finalActiveIndices] = 1
        return F_s

    def findFinalActiveFeatures(self, intialActiveFeatures):
        """
        Given the active indices of phi_0(s) find the final active indices of phi(s) based on discovered features
        """
        finalActiveFeatures = []
        k = len(intialActiveFeatures)
        initialSet = set(intialActiveFeatures)

        if 2 ** k <= self.features_num:
            # k can be big which can cause this part to be very slow
            # if k is large then find active features by enumerating on the
            # discovered features.
            if self.use_chirstoph_ordered_features:
                for i in range(k, 0, -1):
                    if len(initialSet) == 0:
                        break
                    # generate list of all combinations with i elements
                    cand_i = [(c, self.iFDD_features[frozenset(c)].index)
                              for c in combinations(initialSet, i)
                              if frozenset(c) in self.iFDD_features]
                    # sort (recent features (big ids) first)
                    cand_i.sort(key=lambda x: x[1], reverse=True)
                    #idx = -1
                    for candidate, ind in cand_i:
                        # the next block is for testing only
                        #cur_idx = self.iFDD_features[frozenset(candidate)].index
                        # if idx > 0:
                        #    assert(idx > cur_idx)
                        #idx = cur_idx

                        if len(initialSet) == 0:
                            # No more initial features to be mapped to extended
                            # ones
                            break

                        # This was missing from ICML 2011 paper algorithm.
                        # Example: [0,1,20], [0,20] is discovered, but if [0]
                        # is checked before [1] it will be added even though it
                        # is already covered by [0,20]
                        if initialSet.issuperset(set(candidate)):
                            feature = self.iFDD_features.get(
                                frozenset(candidate))
                            if feature is not None:
                                finalActiveFeatures.append(feature.index)
                                if self.sparsify:
                                    # print "Sets:", initialSet, feature.f_set
                                    initialSet = initialSet - feature.f_set
                                    # print "Remaining Set:", initialSet
            else:
                for candidate in powerset(initialSet, ascending=0):
                    if len(initialSet) == 0:
                        # No more initial features to be mapped to extended
                        # ones
                        break

                    # This was missing from ICML 2011 paper algorithm. Example:
                    # [0,1,20], [0,20] is discovered, but if [0] is checked
                    # before [1] it will be added even though it is already
                    # covered by [0,20]
                    if initialSet.issuperset(set(candidate)):
                        feature = self.iFDD_features.get(frozenset(candidate))
                        if feature is not None:
                            finalActiveFeatures.append(feature.index)
                            if self.sparsify:
                                # print "Sets:", initialSet, feature.f_set
                                initialSet = initialSet - feature.f_set
                                # print "Remaining Set:", initialSet
        else:
            # print "********** Using Alternative: %d > %d" % (2**k, self.features_num)
            # Loop on all features sorted on their size and then novelty and
            # activate features
            for feature in self.sortediFDDFeatures.toList():
                if len(initialSet) == 0:
                    # No more initial features to be mapped to extended ones
                    break

                if initialSet.issuperset(set(feature.f_set)):
                    finalActiveFeatures.append(feature.index)
                    if self.sparsify:
                        # print "Sets:", initialSet, feature.f_set
                        initialSet = initialSet - feature.f_set
                        # print "Remaining Set:", initialSet

        if self.useCache:
            self.cache[frozenset(intialActiveFeatures)] = finalActiveFeatures
        return finalActiveFeatures

    def post_discover(self, s, terminal, a, td_error, phi_s):
        """
        returns the number of added features
        """
        # Indices of non-zero elements of vector phi_s
        activeFeatures = phi_s.nonzero()[0]
        discovered = 0
        for g_index, h_index in combinations(activeFeatures, 2):
            discovered += self.inspectPair(g_index, h_index, td_error)
        return discovered

    def inspectPair(self, g_index, h_index, td_error):
        # Inspect feature f = g union h where g_index and h_index are the indices of features g and h
        # If the relevance is > Threshold add it to the list of features
        # Returns True if a new feature is added
        g = self.featureIndex2feature[g_index].f_set
        h = self.featureIndex2feature[h_index].f_set
        f = g.union(h)
        feature = self.iFDD_features.get(f)
        if not self.iFDDPlus:
            td_error = abs(td_error)

        if feature is not None:
            # Already exists
            return False

        # Look it up in potentials
        potential = self.iFDD_potentials.get(f)
        if potential is None:
            # Generate a new potential and put it in the dictionary
            potential = iFDD_potential(f, g_index, h_index)
            self.iFDD_potentials[f] = potential

        potential.cumtderr += td_error
        potential.cumabstderr += abs(td_error)
        potential.count += 1
        # Check for discovery

        if np.random.rand() < self.iFDDPlus:
            relevance = abs(potential.cumtderr) / np.sqrt(potential.count)
        else:
            relevance = potential.cumabstderr

        if relevance >= self.discovery_threshold:
            self.maxRelevance = -np.inf
            self.addFeature(potential)
            return True
        else:
            self.updateMaxRelevance(relevance)
            return False

    def show(self):
        self.showFeatures()
        self.showPotentials()
        self.showCache()

    def updateWeight(self, p1_index, p2_index):
        # Add a new weight corresponding to the new added feature for all actions.
        # The new weight is set to zero if sparsify = False, and equal to the
        # sum of weights corresponding to the parents if sparsify = True
        a = self.domain.actions_num
        # Number of feature before adding the new one
        f = self.features_num - 1
        if self.sparsify:
            newElem = (self.weight_vec[p1_index::f] +
                       self.weight_vec[p2_index::f]).reshape((-1, 1))
        else:
            newElem = None
        self.weight_vec = addNewElementForAllActions(self.weight_vec, a, newElem)
        # We dont want to reuse the hased phi because phi function is changed!
        self.hashed_s = None

    def addInitialFeatures(self):
        for i in xrange(self.initial_representation.features_num):
            feature = iFDD_feature(i)
            # shout(self,self.iFDD_features[frozenset([i])].index)
            self.iFDD_features[frozenset([i])] = feature
            self.featureIndex2feature[feature.index] = feature
            # priority is 1/number of initial features corresponding to the
            # feature
            priority = 1
            self.sortediFDDFeatures.push(priority, feature)

    def addFeature(self, potential):
        # Add it to the list of features
        # Features_num is always one more than the max index (0-based)
        potential.index = self.features_num
        self.features_num += 1
        feature = iFDD_feature(potential)
        self.iFDD_features[potential.f_set] = feature
        # Expand the size of the weight_vec
        self.updateWeight(feature.p1, feature.p2)
        # Update the index to feature dictionary
        self.featureIndex2feature[feature.index] = feature
        # print "IN IFDD, New Feature = %d => Total Features = %d" % (feature.index, self.features_num)
        # Update the sorted list of features
        # priority is 1/number of initial features corresponding to the feature
        priority = 1 / (len(potential.f_set) * 1.)
        self.sortediFDDFeatures.push(priority, feature)

        # If you use cache, you should invalidate entries that their initial
        # set contains the set corresponding to the new feature
        if self.useCache:
            for initialActiveFeatures in self.cache.keys():
                if initialActiveFeatures.issuperset(feature.f_set):
                    if self.sparsify:
                        self.cache.pop(initialActiveFeatures)
                    else:
                        # If sparsification is not used, simply add the new
                        # feature id to all cached values that have feature set
                        # which is a super set of the features corresponding to
                        # the new discovered feature
                        self.cache[initialActiveFeatures].append(feature.index)
        if self.debug:
            self.show()

    def batchDiscover(self, td_errors, phi, states):
        # Discovers features using iFDD in batch setting.
        # TD_Error: p-by-1 (How much error observed for each sample)
        # phi: n-by-p features corresponding to all samples (each column corresponds to one sample)
        # self.batchThreshold is the minimum relevance value for the feature to
        # be expanded
        SHOW_PLOT = 0  # Shows the histogram of relevances
        maxDiscovery = self.maxBatchDiscovery
        n = self.features_num  # number of features
        p = len(td_errors)  # Number of samples
        counts = np.zeros((n, n))
        relevances = np.zeros((n, n))
        for i in xrange(p):
            phiphiT = np.outer(phi[i, :], phi[i,:])
            if self.iFDDPlus:
                relevances += phiphiT * td_errors[i]
            else:
                relevances += phiphiT * abs(td_errors[i])
            counts += phiphiT
        # Remove Diagonal and upper part of the relevances as they are useless
        relevances = np.triu(relevances, 1)
        non_zero_index = np.nonzero(relevances)
        if self.iFDDPlus:
            # Calculate relevances based on theoretical results of ICML 2013
            # potential submission
            relevances[non_zero_index] = np.divide(
                np.abs(relevances[non_zero_index]),
                np.sqrt(counts[non_zero_index]))
        else:
            # Based on Geramifard11_ICML Paper
            relevances[non_zero_index] = relevances[non_zero_index]

        # Find indexes to non-zero excited pairs
        # F1 and F2 are the parents of the potentials
        (F1, F2) = relevances.nonzero()
        relevances = relevances[F1, F2]
        if len(relevances) == 0:
            # No feature to add
            self.logger.debug("iFDD Batch: Max Relevance = 0")
            return False

        if SHOW_PLOT:
            e_vec = relevances.flatten()
            e_vec = e_vec[e_vec != 0]
            e_vec = np.sort(e_vec)
            plt.ioff()
            plt.plot(e_vec, linewidth=3)
            plt.show()

        # Sort based on relevances
        # We want high to low hence the reverse: [::-1]
        sortedIndices = np.argsort(relevances)[::-1]
        max_relevance = relevances[sortedIndices[0]]
        # Add top <maxDiscovery> features
        self.logger.debug(
            "iFDD Batch: Max Relevance = {0:g}".format(max_relevance))
        added_feature = False
        new_features = 0
        for j in xrange(len(relevances)):
            if new_features >= maxDiscovery:
                break
            max_index = sortedIndices[j]
            f1 = F1[max_index]
            f2 = F2[max_index]
            relevance = relevances[max_index]
            if relevance > self.batchThreshold:
                # print "Inspecting",
                # f1,f2,'=>',self.getStrFeatureSet(f1),self.getStrFeatureSet(f2)
                if self.inspectPair(f1, f2, np.inf):
                    self.logger.debug(
                        'New Feature %d: %s, Relevance = %0.3f' %
                        (self.features_num - 1, self.getStrFeatureSet(self.features_num - 1), relevances[max_index]))
                    new_features += 1
                    added_feature = True
            else:
                # Because the list is sorted, there is no use to look at the
                # others
                break
        return (
            # A signal to see if the representation has been expanded or not
            added_feature
        )

    def showFeatures(self):
        print "Features:"
        print "-" * 30
        print " index\t| f_set\t| p1\t| p2\t | Weights (per action)"
        print "-" * 30
        for feature in reversed(self.sortediFDDFeatures.toList()):
        # for feature in self.iFDD_features.itervalues():
            # print " %d\t| %s\t| %s\t| %s\t| %s" %
            # (feature.index,str(list(feature.f_set)),feature.p1,feature.p2,str(self.weight_vec[feature.index::self.features_num]))
            print " %d\t| %s\t| %s\t| %s\t| Omitted" % (feature.index, self.getStrFeatureSet(feature.index), feature.p1, feature.p2)

    def showPotentials(self):
        print "Potentials:"
        print "-" * 30
        print " index\t| f_set\t| relevance\t| count\t| p1\t| p2"
        print "-" * 30
        for _, potential in self.iFDD_potentials.iteritems():
            print " %d\t| %s\t| %0.2f\t| %d\t| %s\t| %s" % (potential.index, str(np.sort(list(potential.f_set))), potential.relevance, potential.count, potential.p1, potential.p2)

    def showCache(self):
        if self.useCache:
            print "Cache:"
            if len(self.cache) == 0:
                print 'EMPTY!'
                return
            print "-" * 30
            print " initial\t| Final"
            print "-" * 30
            for initial, active in self.cache.iteritems():
                print " %s\t| %s" % (str(list(initial)), active)

    def updateMaxRelevance(self, newRelevance):
        # Update a global max relevance and outputs it if it is updated
        if self.maxRelevance < newRelevance:
            self.maxRelevance = newRelevance
            if self.PRINT_MAX_RELEVANCE:
                self.logger.debug(
                    "iFDD Batch: Max Relevance = {0:g}".format(newRelevance))

    def getFeature(self, f_id):
        # returns a feature given a feature id
        if f_id in self.featureIndex2feature.keys():
            return self.featureIndex2feature[f_id]
        else:
            print "F_id %d is not valid" % f_id
            return None

    def getStrFeatureSet(self, f_id):
        # returns a string that corresponds to the set of features specified by
        # the given feature_id
        if f_id in self.featureIndex2feature.keys():
            return str(sorted(list(self.featureIndex2feature[f_id].f_set)))
        else:
            print "F_id %d is not valid" % f_id
            return None

    def featureType(self):
        return bool

    def __deepcopy__(self, memo):
        ifdd = iFDD(
            self.domain,
            self.discovery_threshold,
            self.initial_representation,
            self.sparsify,
            self.discretization,
            self.debug,
            self.useCache,
            self.maxBatchDiscovery,
            self.batchThreshold,
            self.iFDDPlus)
        for s, f in self.iFDD_features.items():
            new_f = deepcopy(f)
            new_s = deepcopy(s)
            ifdd.iFDD_features[new_s] = new_f
            ifdd.featureIndex2feature[new_f.index] = new_f
        for s, p in self.iFDD_potentials.items():
            new_s = deepcopy(s)
            new_p = deepcopy(p)
            ifdd.iFDD_potentials[new_s] = deepcopy(new_p)
        ifdd.cache = deepcopy(self.cache)
        ifdd.sortediFDDFeatures = deepcopy(self.sortediFDDFeatures)
        ifdd.features_num = self.features_num
        ifdd.weight_vec = deepcopy(self.weight_vec)
        return ifdd