Example #1
0
def disjunction(n=3):
    """Settings for Bergen et al.'s figure 6. Seems to reproduce the effects they report."""
   # bl = {'p': [r'$w_1$', r'$w_2$'], 'q':[r'$w_2$', r'$w_3$']}
    bl = {'p': ['1', '2'], 'q':['1', '3'], 'p & q': ['1'], 'p v q': ['1', '2', '3']}
    lexica = Lexica(baselexicon=bl,
                    atomic_states=['1', '2', '3'],
                    disjunction_cost=1.0,
                    conjunction_cost=0.0,
                    null_cost=5.0,                    
                    join_closure=True,
                    meet_closure=False,
                    block_trivial_messages=True,
                    block_ineffability=False)
    
    lexica.display()
    for key, val in lexica.lexica[8].items():
        print key, len(val), val
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  meanings=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
                  lexprior=np.repeat(1.0/len(lexica), len(lexica)),
                  temperature=1.0,
                  alpha=1.0)
    #mod.plot_expertise_listener(output_filename='../paper/fig/scalardisj-expertise-listener-marginalized.pdf', n=n)
    #mod.plot_expertise_speaker(output_filename='../paper/fig/scalardisj-expertise-speaker.pdf', n=n)
    #mod.plot_expertise_speaker(output_filename='../paper/fig/scalardisj-expertise-speaker-lexsum.pdf', n=n, lexsum=True)
    mod.plot_expertise_listener(n=n)
 def Q_implicature_simulation_datapoint(specific_cost, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON, costs={GENERAL_MSG: 0.0, SPECIFIC_MSG: specific_cost}, join_closure=True, nullsem=True, nullcost=5.0, disjunction_cost=dcost)
     ref_probs = np.repeat(1.0/len(lexica.states), len(lexica.states))
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()), len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(), messages=lexica.messages, states=lexica.states, costs=lexica.cost_vector(), lexprior=lexprior, prior=ref_probs, alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     general_msg_index = lexica.messages.index(GENERAL_MSG)
     general_only_state = lexica.states.index(GENERAL_ONLY_REF)
     disj_state_index = lexica.states.index(DISJ_REF)
     disj_msg_index = lexica.messages.index(DISJ_MSG)
     speaker_val = speaker[disj_state_index, disj_msg_index]
     listener_val = listener[general_msg_index, general_only_state]
     # Determine whether max, with a bit of rounding to avoid spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
Example #3
0
def simple_disjunction(n=3):    
    Lex = np.array([[1.0, 1.0, 0.0],
                    [1.0, 0.0, 0.0],
                    [1.0, 1.0, 1.0]])

    lexica = [Lex,
              np.array([[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 1.0]]),
              np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 1.0]])]

    # Lex = np.array([[1.0, 1.0],
    #                 [1.0, 0.0],
    #                 [1.0, 1.0]])

    # lexica = [Lex,
    #           np.array([[1.0, 0.0], [1.0, 0.0], [1.0, 1.0]]),
    #           np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])]
    
    mod = Pragmod(lexica=lexica,
                  messages=[r'vee', r'wedge', 'NULL'],
                  meanings=['w_{1}', 'w_{2}', 'w_{3}'],
                  costs=np.array([0.0, 0.0, 5.0]),
                  prior=np.repeat(1.0/3.0, 3),
                  lexprior=np.repeat(1.0/3.0, 3),
                  temperature=1.0,
                  alpha=1.0,
                  beta=0.0)

    mod.run_base_model(lexica[0], n=n, display=True, digits=4)
 def I_implicature_simulation_datapoint(common_ref_prob, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON, costs=LEXICAL_COSTS, join_closure=True, nullsem=True, nullcost=5.0, disjunction_cost=dcost)
     ref_probs = np.array([common_ref_prob, (1.0-common_ref_prob)/2.0, (1.0-common_ref_prob)/2.0])
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()), len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(), messages=lexica.messages, states=lexica.states, costs=lexica.cost_vector(), lexprior=lexprior, prior=ref_probs, alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     superkind_term_index = mod.messages.index(SUPERKIND_MSG)
     common_state_index = mod.states.index(COMMON_REF)
     disj_term_index = mod.messages.index(DISJ_MSG)
     disj_state_index = mod.states.index(DISJ_REF)
     # Fill in listener_val and speaker_val:
     listener_val = listener[superkind_term_index, common_state_index]
     speaker_val = speaker[disj_state_index, disj_term_index]
     # Determine whether max, with a bit of rounding to avoid spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
Example #5
0
def scalars(n=3):
    """Scalar example without and with disjunction; compare with Bergen et al.'s figure 5 and figure 9"""
    baselexicon = {'some': [r'$w_{\exists\neg\forall}$', r'$w_{\forall}$'], 'all': [r'$w_{\forall}$']}
    basic_lexica = Lexica(baselexicon=baselexicon)
    lexica = Lexica(baselexicon=baselexicon, join_closure=True, disjunction_cost=0.1)
    lexica.display()    
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  meanings=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
                  lexprior=np.repeat(1.0/len(lexica), len(lexica)),
                  temperature=1.0)
    #mod.run_expertise_model(n=n, display=True, digits=4)
    #mod.plot_expertise_listener(output_filename='../paper/fig/scalar-expertise-listener-marginalized.pdf', n=n)
    mod.plot_expertise_speaker(output_filename='../paper/fig/scalar-expertise-speaker.pdf', n=n)
Example #6
0
    def build(self):
        lex = Lexica(
            baselexicon=self.baselexicon,
            join_closure=True,
            disjunction_cost=self.disjunction_cost,
            nullsem=True,
            null_cost=self.null_cost,
            costs=copy.copy(self.lexical_costs),
            unknown_word=self.unknown_word,
        )

        self.lexica = lex.lexica2matrices()
        self.states = lex.states
        self.messages = lex.messages

        if self.prior == None:
            self.prior = np.repeat(1.0 / len(self.states), len(self.states))

        if self.lexprior == None:
            self.lexprior = np.repeat(1.0 / len(self.lexica), len(self.lexica))

        self.model = Pragmod(
            lexica=self.lexica,
            messages=self.messages,
            meanings=self.states,
            costs=lex.cost_vector(),
            prior=self.prior,
            lexprior=self.lexprior,
            temperature=self.temperature,
            alpha=self.alpha,
            beta=self.beta,
        )

        self.langs = self.model.run_expertise_model(n=self.n, display=True)
Example #7
0
def manner(n=3):
    """Settings for Bergen et al.'s figure 6. Seems to reproduce the effects they report."""
    lexica = Lexica(baselexicon={'SHORT': [r'$w_{RARE}$', r'$w_{FREQ}$'], r'long': [r'$w_{RARE}$', r'$w_{FREQ}$']},
                    costs={'SHORT':1.0, r'long':2.0},
                    null_cost=5.0,
                    join_closure=False,
                    disjunction_cost=0.1)
    lexica.display()   
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  meanings=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.array([2.0/3.0, 1.0/3.0]),
                  lexprior=np.repeat(1.0/len(lexica), len(lexica)),
                  temperature=1.0,
                  alpha=3.0)
    mod.plot_expertise_listener(output_filename='../paper/fig/manner-expertise-listener-marginalized.pdf', n=n)
    mod.plot_expertise_speaker(output_filename='../paper/fig/manner-expertise-speaker.pdf', n=n)
    mod.plot_expertise_speaker(output_filename='../paper/fig/manner-expertise-speaker-lexsum.pdf', n=n, lexsum=True)
Example #8
0
class Experiment:
    def __init__(
        self,
        n=3,
        disjunction_cost=0.0,
        lexical_costs={"A": 0.0, "B": 0.0, "X": 0.0},
        temperature=1.0,
        alpha=1.0,
        beta=1.0,
        baselexicon={"A": ["1"], "B": ["2"], "X": ["1", "2"]},
        prior=None,
        lexprior=None,
        null_cost=5.0,
        unknown_word=None,
    ):
        self.n = n
        self.disjunction_cost = disjunction_cost
        self.lexical_costs = lexical_costs
        self.temperature = temperature
        self.alpha = alpha
        self.beta = beta
        self.baselexicon = baselexicon
        self.prior = prior
        self.lexprior = lexprior
        self.null_cost = null_cost
        self.unknown_word = unknown_word

    def build(self):
        lex = Lexica(
            baselexicon=self.baselexicon,
            join_closure=True,
            disjunction_cost=self.disjunction_cost,
            nullsem=True,
            null_cost=self.null_cost,
            costs=copy.copy(self.lexical_costs),
            unknown_word=self.unknown_word,
        )

        self.lexica = lex.lexica2matrices()
        self.states = lex.states
        self.messages = lex.messages

        if self.prior == None:
            self.prior = np.repeat(1.0 / len(self.states), len(self.states))

        if self.lexprior == None:
            self.lexprior = np.repeat(1.0 / len(self.lexica), len(self.lexica))

        self.model = Pragmod(
            lexica=self.lexica,
            messages=self.messages,
            meanings=self.states,
            costs=lex.cost_vector(),
            prior=self.prior,
            lexprior=self.lexprior,
            temperature=self.temperature,
            alpha=self.alpha,
            beta=self.beta,
        )

        self.langs = self.model.run_expertise_model(n=self.n, display=True)

    ######################################################################
    ##### LISTENER PERSPECTIVE

    def listener_inference(self, msg="A v X"):
        final_lis_lang = self.langs[-1]
        msg_index = self.messages.index(msg)
        prob_table = []
        for lex_index in range(len(self.lexica)):
            row = np.array([final_lis_lang[lex_index][msg_index][j] for j in range(len(self.states))])
            prob_table.append(row)
        return np.array(prob_table)

    def show_max_lex_state_values(self, joint_prob_table, precision=10):
        max_prob = np.round(np.max(joint_prob_table), precision)
        for i, j in product(range(len(self.lexica)), range(len(self.states))):
            if np.round(joint_prob_table[i, j], precision) == max_prob:
                print "<Lex%s, %s>: %s" % (i, self.states[j], joint_prob_table[i, j])

    def display_listener_inference(self, msg="A v X", digits=3):
        colwidth = max([len(x) for x in self.messages + self.states] + [digits]) + 4
        print "--------------------------------------------------"
        print self.params2str()
        prob_table = self.listener_inference(msg=msg)
        headervals = [""] + self.states + ["sum(lex)"]
        print self.rowformatter(headervals, colwidth=colwidth)
        for lex_index in range(len(self.lexica)):
            rowvals = (
                ["Lex%s" % lex_index]
                + [round(x, digits) for x in prob_table[lex_index]]
                + [np.round(np.sum(prob_table[lex_index]), digits)]
            )
            print self.rowformatter(rowvals, colwidth=colwidth)
        print self.rowformatter(["sum(state)"] + list(np.round(np.sum(prob_table, axis=0), digits)), colwidth=colwidth)

    def plot_listener_inference_depth_values(
        self,
        msg="A v X",
        target_state="1 v 2",
        n_values=np.arange(1, 5, 1),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        self.plot_listener_inference_parameter_space(
            msg=msg,
            target_state=target_state,
            parameter_name="n",
            parameter_text="Depth",
            parameter_values=n_values,
            legend_loc=legend_loc,
            output_filename=output_filename,
            progress_report=progress_report,
        )

    def plot_listener_inference_beta_values(
        self,
        msg="A v X",
        target_state="1 v 2",
        beta_values=np.arange(0.01, 5.0, 0.01),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        self.plot_listener_inference_parameter_space(
            msg=msg,
            target_state=target_state,
            parameter_name="beta",
            parameter_text=r"$\beta$",
            parameter_values=beta_values,
            legend_loc=legend_loc,
            output_filename=output_filename,
            progress_report=progress_report,
        )

    def plot_listener_inference_alpha_values(
        self,
        msg="A v X",
        target_state="1 v 2",
        alpha_values=np.arange(0.01, 5.0, 0.01),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        self.plot_listener_inference_parameter_space(
            msg=msg,
            target_state=target_state,
            parameter_name="alpha",
            parameter_text=r"$\alpha$",
            parameter_values=alpha_values,
            legend_loc=legend_loc,
            output_filename=output_filename,
            progress_report=progress_report,
        )

    def plot_listener_inference_disjunction_costs(
        self,
        msg="A v X",
        target_state="1 v 2",
        disjunction_cost_values=np.arange(0.0, 5.0, 0.01),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        self.plot_listener_inference_parameter_space(
            msg=msg,
            target_state=target_state,
            parameter_name="disjunction_cost",
            parameter_values=disjunction_cost_values,
            legend_loc=legend_loc,
            output_filename=output_filename,
            progress_report=progress_report,
        )

    def plot_listener_inference_lambda_values(
        self,
        msg="A v X",
        target_state="1 v 2",
        lambda_values=np.arange(0.01, 5.0, 0.01),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        self.plot_listener_inference_parameter_space(
            msg=msg,
            target_state=target_state,
            parameter_name="temperature",
            parameter_text=r"$\lambda$",
            parameter_values=lambda_values,
            legend_loc=legend_loc,
            output_filename=output_filename,
            progress_report=progress_report,
        )

    def plot_listener_inference_parameter_space(
        self,
        msg="A v X",
        target_state="1 v 2",
        parameter_name="disjunction_cost",
        parameter_text=None,
        parameter_values=np.arange(0.0, 5.0, 0.01),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        probs = defaultdict(list)
        # Store the original to respect the problem:
        original = getattr(self, parameter_name)
        # Calculate and organize the values:
        for paramval in parameter_values:
            setattr(self, parameter_name, paramval)
            self.build()
            target_state_index = self.states.index(target_state)
            if progress_report:
                self.display_listener_inference(msg=msg)
            prob_table = self.listener_inference(msg=msg)
            for lex_index in range(len(self.lexica)):
                prob = prob_table[lex_index][target_state_index]
                sorted_probs = sorted(prob_table.flatten())
                maxval = False
                # Add a True max flag iff this prob is the max and the max is unique:
                if sorted_probs[-1] == prob and sorted_probs[-1] != sorted_probs[-2]:
                    maxval = True
                probs[lex_index].append((paramval, prob, maxval))
        # Restore the original:
        setattr(self, parameter_name, original)
        # Plot:
        if parameter_text == None:
            parameter_text = parameter_names
        fig = plt.figure(figsize=(fig_width, fig_height))
        for lex_index in range(len(self.lexica)):
            paramvals, vals, maxval_markers = zip(*probs[lex_index])
            lex_rep = self.lex2str(self.lexica[lex_index])
            plt.plot(
                paramvals,
                vals,
                marker="",
                linestyle="-",
                label=lex_rep,
                color=colors[lex_index],
                markersize=0,
                linewidth=3,
            )
        for lex_index in range(len(self.lexica)):
            paramvals, vals, maxval_markers = zip(*probs[lex_index])
            # Dots mark max-values in the joint table --- best inferences for the listener:
            dots = [(paramval, val) for paramval, val, max_marker in probs[lex_index] if max_marker]
            if dots:
                dotsx, dotsy = zip(*dots)
                plt.plot(dotsx, dotsy, marker="o", linestyle="", color=colors[lex_index], markersize=8)
        plt.title("Listener hears '%s'\n\n%s" % (msg, self.params2str(exclude=[parameter_name])), fontsize=title_size)
        plt.xlabel(parameter_text, fontsize=axis_label_size)
        plt.ylabel(r"Listener probability for $\langle$Lex, %s$\rangle$" % target_state, fontsize=axis_label_size)
        plt.legend(loc=legend_loc)
        plt.text(0.01, 0.95, "dots mark max values", fontsize=14)
        x1, x2, y1, y2 = plt.axis()
        plt.axis((x1, x2, 0.0, 1.0))
        if output_filename:
            plt.savefig(output_filename)
        else:
            plt.show()

    ######################################################################
    ##### SPEAKER PERSPECTIVE

    def speaker_behavior(self, state="1"):
        final_spk_index = self.langs[-2]
        state_index = self.states.index(state)
        prob_table = []
        for msg_index in range(len(self.messages)):
            row = np.array([final_spk_index[msg_index][state_index][j] for j in range(len(self.lexica))])
            prob_table.append(row)
        return np.array(prob_table)

    def show_max_message_values(self, spk_prob_table, precision=10):
        for j in range(len(self.lexica)):
            probs = np.round(spk_prob_table[:, j], precision)
            maxprob = np.max(probs)
            indices = [i for i, val in enumerate(probs) if val == maxprob]
            msgs = [self.messages[i] for i in indices]
            print "Lex%s %s: {%s} \t prob = %s" % (j, self.lex2str(self.lexica[j]), ", ".join(msgs), maxprob)

    def display_speaker_behavior(self, state="1", digits=3):
        colwidth = max([len(x) for x in self.messages] + [digits]) + 4
        print "--------------------------------------------------"
        print self.params2str()
        prob_table = self.speaker_behavior(state=state)
        headervals = [""] + ["Lex%s" % i for i in range(len(self.lexica))]
        print self.rowformatter(headervals, colwidth=colwidth)
        for msg_index, msg in enumerate(self.messages):
            rowvals = [msg] + [round(x, digits) for x in prob_table[msg_index]]
            print self.rowformatter(rowvals, colwidth=colwidth)

    def plot_speaker_behavior_disjunction_costs(
        self,
        state="1 v 2",
        lexicon=0,
        disjunction_cost_values=np.arange(0.0, 5.0, 0.05),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        self.plot_speaker_behavior_parameter_space(
            state=state,
            lexicon=lexicon,
            parameter_name="disjunction_cost",
            parameter_values=disjunction_cost_values,
            legend_loc=legend_loc,
            output_filename=output_filename,
            progress_report=progress_report,
        )

    def plot_speaker_behavior_beta_values(
        self,
        state="1 v 2",
        lexicon=0,
        beta_values=np.arange(0.01, 5.0, 0.05),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        self.plot_speaker_behavior_parameter_space(
            state=state,
            lexicon=lexicon,
            parameter_name="beta",
            parameter_text=r"$\beta$",
            parameter_values=beta_values,
            legend_loc=legend_loc,
            output_filename=output_filename,
            progress_report=progress_report,
        )

    def plot_speaker_behavior_alpha_values(
        self,
        state="1 v 2",
        lexicon=0,
        alpha_values=np.arange(0.01, 5.0, 0.05),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        self.plot_speaker_behavior_parameter_space(
            state=state,
            lexicon=lexicon,
            parameter_name="alpha",
            parameter_text=r"$\alpha$",
            parameter_values=alpha_values,
            legend_loc=legend_loc,
            output_filename=output_filename,
            progress_report=progress_report,
        )

    def plot_speaker_behavior_parameter_space(
        self,
        state="1 v 2",
        lexicon=0,
        parameter_name="disjunction_cost",
        parameter_text=None,
        parameter_values=np.arange(0.0, 5.0, 0.05),
        legend_loc="upper right",
        output_filename=None,
        progress_report=True,
    ):
        probs = defaultdict(list)
        # Store the original to respect the problem:
        original = getattr(self, parameter_name)
        # Calculate and organize the values:
        for paramval in parameter_values:
            setattr(self, parameter_name, paramval)
            self.build()
            if progress_report:
                self.display_speaker_behavior(state=state)
            prob_table = self.speaker_behavior(state=state)
            for msg_index, msg in enumerate(self.messages):
                prob = prob_table[msg_index, lexicon]
                probs[msg_index].append((paramval, prob))
        # Restore the original parameter setting:
        setattr(self, parameter_name, original)
        # Plotting:
        if parameter_text == None:
            parameter_text = parameter_name
        fig = plt.figure(figsize=(fig_width, fig_height))
        for msg_index, msg in enumerate(self.messages):
            paramvals, vals = zip(*probs[msg_index])
            plt.plot(
                paramvals, vals, marker="", linestyle="-", label=msg, color=colors[msg_index], markersize=0, linewidth=3
            )
        # Annotations:
        lex_rep = self.lex2str(self.lexica[lexicon])
        plt.title(
            "Speaker observes <%s, Lexicon: %s> \n\n%s" % (state, lex_rep, self.params2str(exclude=[parameter_name])),
            fontsize=title_size,
        )
        plt.xlabel(parameter_text, fontsize=axis_label_size)
        plt.ylabel(r"Speaker probability for producing message", fontsize=axis_label_size)
        plt.legend(loc=legend_loc)
        x1, x2, y1, y2 = plt.axis()
        plt.axis((x1, x2, -0.01, 1.01))
        if output_filename:
            plt.savefig(output_filename)
        else:
            plt.show()

    ######################################################################
    ##### PRINTING

    def rowformatter(self, row, colwidth=12):
        return "".join([str(x).rjust(colwidth) for x in row])

    def params2str(self, joiner="; ", exclude=[]):
        vals = []
        params = {
            "n": r"$n$",
            "temperature": r"$\lambda$",
            "disjunction_cost": r"cost($\vee$)",
            "lexical_costs": r"costs",
            "null_cost": r"cost$(\emptyset)$",
            "alpha": r"$\alpha$",
            "beta": r"$\beta$",
        }
        for x in sorted(params):
            if x not in exclude:
                if x == "lexical_costs":
                    for p, c in sorted(self.lexical_costs.items()):
                        vals.append("cost(%s): %s" % (p, c))
                else:
                    vals.append("%s: %s" % (params[x], getattr(self, x)))
        return joiner.join(vals)

    def lex2str(self, lexicon_or_lexicon_index):
        lexicon = lexicon_or_lexicon_index
        if isinstance(lexicon, int):
            lexicon = self.lexica[lexicon_or_lexicon_index]

        def state_sorter(x):
            return sorted(x, cmp=(lambda x, y: cmp(len(x), len(y))))

        entries = []
        for p_index, p in enumerate(sorted(self.baselexicon.keys())):
            sem = [s for i, s in enumerate(self.states) if lexicon[p_index][i] > 0.0 and not DISJUNCTION_SIGN in s]
            entry = p + "={" + ",".join(state_sorter(sem)) + "}"
            entries.append(entry)
        return "; ".join(entries)