예제 #1
0
def main():
    k = int(sys.argv[1])
    T = int(sys.argv[2])
    text = sys.stdin.read()
    model = MarkovModel(text, k)
    z = model.gen(text[:k], T)
    stdio.writeln(z)
예제 #2
0
def main():
    # v, s get from the commandline
    v, s = int(sys.argv[1]), sys.argv[2]
    t = sys.stdin.read()
    model = MarkovModel(t, v)
    r = model.replace_unknown(s)
    for i in range(0, len(r)):
        if i == len(r) - 1:
            stdio.writeln(r[i])
        else:
            stdio.write(r[i])
예제 #3
0
def main():

    # Create a Markov model named model using text and k.
    model = MarkovModel(text, k)

    # Use model.gen() to generate a random text of length T and
    # starting with the first k characters of text.
    rand = model.gen(text[:k], T)

    # Write the random text to standard output.
    stdio.writeln(rand)
예제 #4
0
def main():
    k = int(sys.argv[1])
    s = sys.argv[2]
    text = sys.stdin.read()
    model = MarkovModel(text, k)
    r = model.replace_unknown(s)
    for i in range(0, len(r)):
        if i == len(r) - 1:
            stdio.writeln(r[i])
        else:
            stdio.write(r[i])
예제 #5
0
 def __init__(self, num_positions=1000, num_orientations=10, map=None, grid_debug=False):
     # Build the markov model
     self.markov_model = MarkovModel(num_positions=num_positions, num_orientations=num_orientations, map=map)
     self.markov_model.make_states(grid_debug=grid_debug)
     self.markov_model.build_roadmap()
     self.num_states = num_positions * num_orientations # S
     if(grid_debug):
         self.num_states = self.markov_model.num_states
     # Visualization
     self.turn_right_pub = rospy.Publisher('/right_pose_array', PoseArray, queue_size=10)
     self.turn_left_pub = rospy.Publisher('/left_pose_array', PoseArray, queue_size=10)
     self.forward_pub = rospy.Publisher('/forward_pose_array', PoseArray, queue_size=10)
     self.goal_state_pub = rospy.Publisher('/goal_state_marker', Marker, queue_size=10)
 def __init__(self, k, dict_corpora):
     '''(TopModel,int,dict)-->None'''
     topmode = {}
     for nome in dict_corpora:
         topmode[nome] = MarkovModel(k, dict_corpora[nome])
     self.tp = topmode
     self.k = k
예제 #7
0
    def __init__(self, num_positions=1000, num_orientations=10, map=None, grid_debug=False, seed=False):
        # Build the markov model
        self.markov_model = MarkovModel(num_positions=num_positions, num_orientations=num_orientations, map=map)
        self.markov_model.make_states(grid_debug=grid_debug, seed=seed)
        self.markov_model.build_roadmap()

        self.num_states = num_positions * num_orientations
        if(grid_debug):
            self.num_states = self.markov_model.num_states

        self.goal_state_idx = 0
        self.reward_radius = 0
        self.rewards = np.empty(self.num_states)

        self.policy = [] # List of actions to take corresponding to each state idx in markov_model.model_states
        self.value_function = [] # List of rewards corresponding to being each state based on the policy

        # Visualization
        self.turn_right_pub = rospy.Publisher('/right_pose_array', PoseArray, queue_size=10)
        self.turn_left_pub = rospy.Publisher('/left_pose_array', PoseArray, queue_size=10)
        self.forward_pub = rospy.Publisher('/forward_pose_array', PoseArray, queue_size=10)
        self.goal_state_pub = rospy.Publisher('/goal_state_marker', Marker, queue_size=10)
예제 #8
0
def main():
    k, t = int(sys.argv[1]), int(sys.argv[2])
    text = sys.stdin.read()
    model = MarkovModel(text, k)
    kgram = text[:k]
    print(model.gen(kgram, t))
예제 #9
0
    def __str__(self):
        print_ret = ''
        for elem in self.dict_corpora:
            print_ret += str(MarkovModel(self.k, self.dict_corpora[elem])) + "\n"

        return print_ret
예제 #10
0
파일: driver.py 프로젝트: npankaj365/eybot
def main():
    """
    Tweeter object and related credentials
    """
    tweeter = Tweeter()

    # c_key = ''
    # c_secret = ''
    # a_token = ''
    # a_secret = ''
    """
    Login to twitter using above credentials
    """
    # tweeter.login(c_key, c_secret, a_token, a_secret)

    try:
        load = sys.argv[1]
    except Exception:
        Utility.error('main', 'Error in passed parameters.')
    """
    Create MarkovModel object to formulate tweets
    """
    model = MarkovModel()
    try:
        # Load a already saved model
        if load in ['-l', '-L']:
            filename, keyword, prefix, suffix, n_tweets = load_params()
            Utility.log('main', 'Loading model from file {0}'.format(filename))
            model.load('../model/m_blk_{0}'.format(filename))

            tweeter.start_tweeting(time=1,
                                   keywords=keyword.split(),
                                   prefix=prefix,
                                   suffix=suffix)
            tweeter._autoconstruct(model, int(n_tweets))
        # Carve up a dictionary from read
        elif load in ['-r', '-R']:
            filename, keyword, prefix, suffix, n_tweets = load_params()
            Utility.log(
                'main',
                'Training model from file {0}, and saving.'.format(filename))
            model.read('../data/{0}.txt'.format(filename))
            model.save('../model/m_blk_{0}'.format(filename.split('.')[0]))

            tweeter.start_tweeting(time=1,
                                   keywords=keyword.split(),
                                   prefix=prefix,
                                   suffix=suffix)
            tweeter._autoconstruct(model, int(n_tweets))
        # Collect tweets and store to a database
        elif load in ['-c', '-C']:
            no = sys.argv[2]
            Utility.log(
                'main',
                'Collecting {0} tweets and saving them to db.'.format(no))
            tweets = tweeter.read_tweets(int(no))
            Tweeter.store(tweets)
        # Load a number of tweets and amplify
        elif load in ['-a', '-A']:
            no = sys.argv[2]
            timeout = sys.argv[3]
            Utility.log(
                'main',
                'Tweeting {0} tweets every {1} seconds'.format(no, timeout))
            tweeter.amplify_tweets(int(no), int(timeout))
        else:
            Utility.error('main', 'Invalid parameters')

        Utility.log('main', 'Exiting program ...')
    except KeyboardInterrupt:
        Utility.log('main', 'Terminating program ...')
예제 #11
0
def main():
    k, T = int(sys.argv[1]), int(sys.argv[2])
    txt = sys.stdin.read()
    model = MarkovModel(txt, k)
    stdio.writeln(model.gen(txt[:k], T))
예제 #12
0
 def modelo(self, nome_modelo):
     if nome_modelo not in self.dict_corpora:
         print("modelo(): modelo '" + str(nome_modelo) +"' não foi definido")
         return
     else:
         return MarkovModel(self.k, self.dict_corpora[nome_modelo])
예제 #13
0
from nltk.tokenize import TweetTokenizer
from sanitize_text import sanitize_text
from tokenize_text import tokenize_text
from markov_model import MarkovModel

import os

text = ['This', 'is', 'a', 'terribe', 'movie']

negative_markov_model = MarkovModel(order=1)
path = "./aclImdb/train/neg"
for filename in os.listdir(path)[:1000]:

    # Get file
    file = open(path + "/" + filename, 'r')
    text = file.read()
    file.close()

    # Add to model
    sanitized_text = sanitize_text(text)
    tokens = tokenize_text(sanitized_text)
    negative_markov_model.update(tokens)
print(negative_markov_model.sentence_average_count(text))

positive_markov_model = MarkovModel(order=1)
path = "./aclImdb/train/pos"
for filename in os.listdir(path)[:1000]:

    # Get file
    file = open(path + "/" + filename, 'r')
    text = file.read()
예제 #14
0
def main():
    """
    main the main driver code which logs into using the twitter credintials and executes the script according to the given modifier

    Options:
        -l : Load model from file. Use this if using an existing model. 
            Filename    : Name of pickle file to load markov model.  
            Keywords    : Seedwords which are an intergral part of tweet. Keywords may be single or multiple. 
            Prefix      : Word/s to start the tweet with. Prefix may be single or multiple words. 
            Suffix      : Word/s to add at the end of tweet. Suffix may be single or multiple words.
            num_tweets  : Number of tweets to be written.
        -r : Read file to create model. Use this if including your own text file.
            Filename    : Name of text file to construct markov model.  
            Keywords    : Seedwords which are an intergral part of tweet. Keywords may be single or multiple. 
            Prefix      : Word/s to start the tweet with. Prefix may be single or multiple words. 
            Suffix      : Word/s to add at the end of tweet. Suffix may be single or multiple words.
            num_tweets  : Number of tweets to be written.
        -c : Collect tweets from TwitterStream.
            no          : Number of tweets to collect.
        -a : Amplify tweets i.e. Retweet tweets stored using -c.   
            no          : Number of tweets to amplify.
            timeout     : Time to wait in seconds before retweeting. 


    logs into the twitter using the given credentials.

    If there is error it catches the exception and returns an exception message    
    """
    tweeter = Tweeter()

    # c_key = ''
    # c_secret = ''
    # a_token = ''
    # a_secret = ''
    # tweeter.login(c_key, c_secret, a_token, a_secret)

    try:
        load = sys.argv[1]
    except Exception:
        Utility.error('main', 'Error in passed parameters.')

    # Create MarkovModel object to formulate tweets
    model = MarkovModel()
    try:
        # Load a already saved model
        if load in ['-l', '-L']:
            filename, keyword, prefix, suffix, n_tweets = load_params()
            Utility.log('main', 'Loading model from file {0}'.format(filename))
            model.load('../model/m_blk_{0}'.format(filename))

            tweeter.start_tweeting(
                time=1, keywords=keyword.split(), prefix=prefix, suffix=suffix)
            tweeter._autoconstruct(model, int(n_tweets))
        # Carve up a dictionary from read
        elif load in ['-r', '-R']:
            filename, keyword, prefix, suffix, n_tweets = load_params()
            Utility.log(
                'main', 'Training model from file {0}, and saving.'.format(filename))
            model.read('../data/{0}.txt'.format(filename))
            model.save('../model/m_blk_{0}'.format(filename.split('.')[0]))

            tweeter.start_tweeting(
                time=1, keywords=keyword.split(), prefix=prefix, suffix=suffix)
            tweeter._autoconstruct(model, int(n_tweets))
        # Collect tweets and store to a database
        elif load in ['-c', '-C']:
            no = sys.argv[2]
            Utility.log(
                'main', 'Collecting {0} tweets and saving them to db.'.format(no))
            tweets = tweeter.read_tweets(int(no))
            Tweeter.store(tweets)
        # Load a number of tweets and amplify
        elif load in ['-a', '-A']:
            no = sys.argv[2]
            timeout = sys.argv[3]
            Utility.log(
                'main', 'Tweeting {0} tweets every {1} seconds'.format(no, timeout))
            tweeter.amplify_tweets(int(no), int(timeout))
        else:
            Utility.error('main', 'Invalid parameters')

        Utility.log('main', 'Exiting program ...')
    except KeyboardInterrupt:
        Utility.log('main', 'Terminating program ...')
예제 #15
0
    parser.add_argument("--test_file", type=str, default="data/s3/test.txt")
    parser.add_argument("--sent", type=str, default="日本 漫画 的 题材 非常 广泛")
    parser.add_argument("--idx", type=int, default=3)
    parser.add_argument("--topk", type=int, default=10)
    parser.add_argument("--vocab_dir", type=str, default="outputs/vocabs")
    parser.add_argument("--out_dir", type=str, default="outputs/results")
    parser.add_argument("--grams_number", type=int, default=2)
    parser.add_argument("--smooth_strategy", type=str, default="laplace")
    parser.add_argument("--B_laplace", type=int, default=1)
    parser.add_argument("--analysis_model1", type=str, default="laplace")
    parser.add_argument("--analysis_model2", type=str, default="heldOut")
    args = parser.parse_args()

    vocab = train(args.train_file, args.dev_file, args.grams_number,
                  args.smooth_strategy, args.B_laplace)
    model = MarkovModel(vocab)

    if args.func == "pred_file":
        predidct_file(
            model, args.grams_number, args.test_file,
            os.path.join(
                args.out_dir, "test_file_results." + str(args.grams_number) +
                "grams." + args.smooth_strategy + ".txt"))
    elif args.func == "pred_sent":
        predict_sent(
            model, args.grams_number, args.sent, args.idx, args.topk,
            os.path.join(
                args.out_dir, "test_sent_results." + str(args.grams_number) +
                "grams." + args.smooth_strategy + ".txt"))
    elif args.func == "spearman_analysis":
        vocab1 = train(args.train_file, args.dev_file, args.grams_number,
예제 #16
0
 def __init__(self, k, dict_corpora):
     self.k, self.dict_corpora, self.markov = k, dict_corpora, dict()
     for i in dict_corpora: self.markov[i] = MarkovModel(k, dict_corpora[i])
예제 #17
0
def main():
    k = int(sys.argv[1])
    s = str(sys.argv[2])
    text = sys.stdin.read()
    model = MarkovModel(text, k)
    stdio.writeln(model.replace_unknown(s))
예제 #18
0
class MDPToolbox(object):
    def __init__(self, num_positions=1000, num_orientations=10, map=None, grid_debug=False):
        # Build the markov model
        self.markov_model = MarkovModel(num_positions=num_positions, num_orientations=num_orientations, map=map)
        self.markov_model.make_states(grid_debug=grid_debug)
        self.markov_model.build_roadmap()
        self.num_states = num_positions * num_orientations # S
        if(grid_debug):
            self.num_states = self.markov_model.num_states
        # Visualization
        self.turn_right_pub = rospy.Publisher('/right_pose_array', PoseArray, queue_size=10)
        self.turn_left_pub = rospy.Publisher('/left_pose_array', PoseArray, queue_size=10)
        self.forward_pub = rospy.Publisher('/forward_pose_array', PoseArray, queue_size=10)
        self.goal_state_pub = rospy.Publisher('/goal_state_marker', Marker, queue_size=10)

    '''
        Function: get_policy
        Inputs: State goal_state
                int reward_radius

        Uses mdptoolbox Policy iteration to generate a policy to reach the
        specified goal, within the specified radius.
        Returns policy as tuple with len S, num_iterations, and CPU time

    '''
    def get_policy(self, goal_state, reward_radius=0.15):
        transition_probabilities = self.markov_model.roadmap # Shape (A, S, S)
        goal_state_idx = self.get_goal_state_idx(goal_state)
        print("Goal state idx: {}".format(goal_state_idx))
        rewards = self.get_rewards(goal_state_idx, reward_radius) # Shape (S,)

        discount = 0.9 # How much weight to give to future values
        pi = mdptoolbox.mdp.PolicyIteration(transition_probabilities, rewards, discount)
        pi.run()

        return pi.policy, pi.iter, pi.time

    """
        Function: get_goal_state_idx
        Inputs: State state

        Gets the closest existing state in the roadmap to the goal.
        Returns the goal_state_idx.
    """
    def get_goal_state_idx(self, state):
        print(self.markov_model.get_closest_state_idx(state))
        return self.markov_model.get_closest_state_idx(state)

    '''
        Function: get_rewards
        Inputs: int goal_state_idx
                int reward_radius

        Generates a vector assigning a low reward to all states not within the
        minimum radius around the goal. The high reward is assigned to all
        states that are within this radius, such as multiple orientations of
        the same state.
        Returns an array of shape (S,) assigning a reward to being in each state.

    '''
    def get_rewards(self, goal_state_idx, reward_radius=0.15):
        high_reward = 10
        low_reward = -1

        rewards = np.empty((self.num_states,))
        goal_state = self.markov_model.model_states[goal_state_idx]

        for state_idx in range(self.num_states):
            s = self.markov_model.model_states[state_idx]
            if goal_state.distance_to(s) < reward_radius:
                rewards[state_idx] = high_reward
            else:
                rewards[state_idx] = low_reward
        return rewards

    '''
        Function: visualize_policy
        Inputs: array policy
                State goal_state

        Publishes a sphere marker at the goal state.
        Generates three pose arrays corresponding to the three different actions.
        Depending on which action the policy says to execute at a state, the
        pose of this state is added to the corresponding pose array.

    '''
    def visualize_policy(self, policy, goal_state):
        print("Visualizing policy")
        goal_state_idx = self.get_goal_state_idx(goal_state)
        # Visualize the goal state as sphere.
        self.goal_state_pub.publish(self.markov_model.model_states[goal_state_idx].get_marker())

        turn_left_array = PoseArray()
        turn_right_array = PoseArray()
        forward_array = PoseArray()

        turn_left_array.header.frame_id = "map"
        turn_right_array.header.frame_id = "map"
        forward_array.header.frame_id = "map"

        all_actions = Action.get_all_actions()
        # Add pose of each state to array wih corresponding policy action
        for state_idx in range(len(policy)):
            action = all_actions[policy[state_idx]]
            state_pose = self.markov_model.model_states[state_idx].get_pose()

            if(action == Action.LEFT):
                turn_left_array.poses.append(state_pose)
            elif(action == Action.RIGHT):
                turn_right_array.poses.append(state_pose)
            else:
                forward_array.poses.append(state_pose)

        self.turn_left_pub.publish(turn_left_array)
        self.turn_right_pub.publish(turn_right_array)
        self.forward_pub.publish(forward_array)
예제 #19
0
class MDP(object):
    def __init__(self, num_positions=1000, num_orientations=10, map=None, grid_debug=False, seed=False):
        # Build the markov model
        self.markov_model = MarkovModel(num_positions=num_positions, num_orientations=num_orientations, map=map)
        self.markov_model.make_states(grid_debug=grid_debug, seed=seed)
        self.markov_model.build_roadmap()

        self.num_states = num_positions * num_orientations
        if(grid_debug):
            self.num_states = self.markov_model.num_states

        self.goal_state_idx = 0
        self.reward_radius = 0
        self.rewards = np.empty(self.num_states)

        self.policy = [] # List of actions to take corresponding to each state idx in markov_model.model_states
        self.value_function = [] # List of rewards corresponding to being each state based on the policy

        # Visualization
        self.turn_right_pub = rospy.Publisher('/right_pose_array', PoseArray, queue_size=10)
        self.turn_left_pub = rospy.Publisher('/left_pose_array', PoseArray, queue_size=10)
        self.forward_pub = rospy.Publisher('/forward_pose_array', PoseArray, queue_size=10)
        self.goal_state_pub = rospy.Publisher('/goal_state_marker', Marker, queue_size=10)

    """
        Function: set_goal_state
        Inputs: State state
                int reward_radius

        Sets the state that the robot wants to end up in, as well as the
        radius around this state which will count as close enough.
    """
    def set_goal_state(self, state, reward_radius=0.5):
        self.goal_state_idx = self.markov_model.get_closest_state_idx(state)
        # print(self.goal_state_idx)
        # print(self.markov_model.model_states[self.goal_state_idx])
        self.reward_radius = reward_radius

    '''
        Function: set_rewards
        Inputs:

        Generates a vector assigning a low reward to all states not within the
        minimum radius around the goal. The high reward is assigned to all
        states that are within this radius, such as multiple orientations of
        the same state.

    '''
    def set_rewards(self):
        high_reward = 10
        low_reward = -1

        goal_state = self.markov_model.model_states[self.goal_state_idx]
        for state_idx in xrange(self.num_states):
            s = self.markov_model.model_states[state_idx]
            if goal_state.distance_to(s) < self.reward_radius:
                self.rewards[state_idx] = high_reward
            else:
                self.rewards[state_idx] = low_reward

    """
        Function: get_policy
        Inputs: State goal_state - new goal state to be set.

        Calculates the optimal policy for a given (or saved) goal state by
        iteratively solving a value function (how much payoff each action at
        a state will have in the long run) to update the action to take in each
        state (policy).
        Returns the policy, num of iterations, time of compuation (modelled after
        mdptoolbox outputs)

    """
    def get_policy(self, goal_state=None):
        if(goal_state != None):
            self.set_goal_state(goal_state)

        # Generate reward vector
        self.set_rewards()

        # Generate a starting, random policy from all available actions
        start_time = time.time()
        self.policy = self.get_random_policy()

        change = -1
        iteration_count = 0
        while change != 0 and iteration_count < 30:
            print("iteration {}".format(iteration_count))
            iteration_count += 1
            # Find V of this policy.
            solved = self.solve_value_function()
            if(solved):
                # Calculate new policy.
                change = self.get_new_policy()
                print(change)
            else:
                print("Failed to solve for a value function, trying a new random policy")
                # Singular matrix solution to V
                self.policy = self.get_random_policy()
        print("Converged in {} iterations".format(iteration_count))
        print("MDP solved in {}\n".format(time.time() - start_time))
        # print(self.policy)
        return (self.policy, iteration_count, time.time() - start_time)

    """
        Function: get_random_policy
        Inputs:

        Returns a randomly-assigned policy, or action to be taken in each state.

    """
    def get_random_policy(self):
        return np.random.choice(range(len(Action.get_all_actions())), self.num_states, replace=True)  # TODO: xrange good here?

    """
        Function: get_new_policy
        Inputs:

        Computes a new policy by looping through each start state and assigning
        the action that generates the highest reward long-term.
        Returns 0 if no changes to the policy were made. Any other integer
        means that different actions were chosen as compared to the previous
        policy.

    """
    def get_new_policy(self):
        gamma = 0.999
        all_actions = Action.get_all_actions()
        total_change = 0
        for state_idx in xrange(self.num_states):
            ps_matrix = self.build_ps_matrix(state_idx)
            state_rewards = self.rewards[state_idx] + gamma * ps_matrix.dot(self.value_function)
            idx_action = state_rewards.argmax()
            total_change += self.policy[state_idx] - idx_action
            self.policy[state_idx] = idx_action
        return total_change

    """
        Function: solve_value_function
        Inputs:

        Solves for the value function which calculates the infinite horizon or
        payoof at infinite time at each state if following the current policy.
        Returns false if the matrix is non-invertible.

    """
    def solve_value_function(self, gamma=0.999):
        print("Solving value function")
        # Build P matrix.
        p_matrix = self.build_p_matrix()
        # print(p_matrix)

        A_dense = np.identity(self.num_states) - gamma * p_matrix

        A = csr_matrix(A_dense)
        b = self.rewards

        # if(np.linalg.det(A_dense) == 0): # yolo
        #     return False
        self.value_function = spsolve(A, b)
        return True

    """
        Function: build_p_matrix
        Inputs:

        Returns a num_states x num_states matrix with each element representing
        the probability of transitioning from a start_state to end_state with
        the action defined in the policy.

    """
    def build_p_matrix(self):
        p_matrix = np.empty([self.num_states, self.num_states])
        for start_state_idx in xrange(self.num_states):
            action_idx = self.policy[start_state_idx]
            p_matrix[start_state_idx] = self.markov_model.roadmap[action_idx, start_state_idx, :]
        return p_matrix

    """
        Function: build_ps_matrix
        Inputs: int start_state_idx

        Returns a num_actions x num_states matrix with each element representing
        the probability of transitioning from a start_state to end_state with
        the action specified in each row.

    """
    def build_ps_matrix(self, start_state_idx):
        # print("Building PS matrix")
        return self.markov_model.roadmap[:, start_state_idx, :]

    def get_goal_state_idx(self, state):
        return self.markov_model.get_closest_state_idx(state)

    def visualize_policy(self, policy, goal_state):
        print("Visualizing policy")
        goal_state_idx = self.get_goal_state_idx(goal_state)
        # Visualize the goal state as sphere.
        self.goal_state_pub.publish(self.markov_model.model_states[goal_state_idx].get_marker())

        turn_left_array = PoseArray()
        turn_right_array = PoseArray()
        forward_array = PoseArray()

        turn_left_array.header.frame_id = "map"
        turn_right_array.header.frame_id = "map"
        forward_array.header.frame_id = "map"

        all_actions = Action.get_all_actions()
        # Add pose of each state to array wih corresponding policy action
        for state_idx in xrange(len(policy)):
            # print(len(policy))
            # print(policy[state_idx].shape)
            # print(type(policy[state_idx]))
            action = all_actions[policy[state_idx]]
            state_pose = self.markov_model.model_states[state_idx].get_pose()

            if(action == Action.LEFT):
                turn_left_array.poses.append(state_pose)
            elif(action == Action.RIGHT):
                turn_right_array.poses.append(state_pose)
            else:
                forward_array.poses.append(state_pose)

        self.turn_left_pub.publish(turn_left_array)
        self.turn_right_pub.publish(turn_right_array)
        self.forward_pub.publish(forward_array)
예제 #20
0
from flask import Flask, render_template
from markov_model import MarkovModel
from random import choice

app = Flask(__name__)

with open('corpus/pinkfloyd.txt') as f:
    words = f.read().split()

mkv = MarkovModel(corpus=words, order=2)

start_states = list(mkv.keys())


@app.route('/')
def index():
    starting_state = choice(start_states)

    output = list(mkv.sample(120, starting_state=starting_state))

    # Clean output to begin at the beginning of a sentence and end at the end of a sentence
    for i, word in enumerate(output):
        if word[0].isupper():
            output = output[i:]
            break

    for i, word in enumerate(reversed(output)):
        if word[-1] in '.?!':
            output = output[:len(output) - i]
            break
예제 #21
0
def main():
    # Create a Markov model named model using text and k.
    model = MarkovModel(text, k)

    # Use model.replace_unknown() to decode the corrupted text s.
    stdio.writeln(model.replace_unknown(s))