Beispiel #1
0
def cost_per_period(States,Actions,dict_states, args, index_LT):
    P = TransitionProbs(States, Actions, args.Demand_Max, args.LT_s, args.LT_f, args.h, args.b, args.C_s, args.C_f,
                           args.Inv_Max, args.Inv_Min, args.cap_fast, args.cap_slow, dict_states)
    start = time.time()

    env = PI_env(States, Actions, P)
    #print('env created', time.time() - start)

    policy, v = policy_improvement(env, args.discount_factor)
    #print(time.time() - start)

    #print("Policy Probability Distribution:")
    #print(policy)
    #print("")

    #print("Value Function:")
    #print(v)
    #print("")
    np.savetxt("policy-LT%i-cap%i.csv" %(index_LT,args.cap_fast), policy, delimiter=";")
    np.savetxt("valuefunction-LT%i-cap%i.csv"%(index_LT,args.cap_fast), v, delimiter=";")
    np.savetxt("States-LT%i-cap%i.csv"%(index_LT,args.cap_fast),States, delimiter=";")
    np.savetxt("Actions-LT%i-cap%i.csv"%(index_LT,args.cap_fast), Actions, delimiter=";")

    #for index, i in enumerate(States):
    #    print(i, Actions[np.argmax(policy[index])])

    MC, MC_R = MarkovChain.MC(States, P, policy)
    steady_state = MarkovChain.steady_state(States, policy, MC)
    #print(steady_state)
    optimal_cost = MarkovChain.cost_steady_state(steady_state, policy, MC, MC_R)
    return optimal_cost
class MarkovWordGenerator:
    """ This class builds a Markov chain out of some input text and
        spits out something that is hopefully intelligible."""
    
    def __init__(self, inputText, n=1):
        inputText = string.replace(inputText, "\n"," ")
        self._inputText = string.split(inputText, " ")

        # Remove blanks from the list
        try:
            while 1:
                self._inputText.remove('')

        except ValueError:
            pass

        # allow for circular treatment of text, so that every
        # state will lead to at least one other state
        self._inputText = self._inputText + self._inputText[0:n+1]
        self._n = n
        self._markovChain = MarkovChain()
        self.__buildMarkovChain()

    def __buildMarkovChain(self):
        # Build a Markov chain with each set of adjacent n words
        # representing a state in the chain.

        nGram1 = tuple(self._inputText[0: self._n])
        i = 1

        # Cycle through the n-grams in the text and add each n-gram,
        # along with adjacent n-grams, to the markov chain.
        
        while i < (len(self._inputText) - self._n):
            nGram2 = tuple(self._inputText[i: i + self._n])
            self._markovChain.add(nGram1, nGram2)
            nGram1 = nGram2
            i = i + 1

    def generate(self, numWords, method = MarkovChain.MOST_LIKELY):
        # Generate numWords words based on the Markov Chain model.
            
        # Randomly pick the first word generated
        r = randint(0, len(self._inputText) - (self._n + 1))     
        state = tuple(self._inputText[r: r + self._n])
        outputText = string.join([word for word in state], " ")

        # Output each new word based on the previous state        
        for i in range(numWords):
            state = self._markovChain.next(state, method)
            outputText = outputText + ' ' + state[-1]

        return outputText
Beispiel #3
0
def strat_markov_chain(play):
    """You can copy and modify this strategy."""
    ret_val = result.Result()
    for act in play.gen_acts():
        for scene in act.gen_scenes():
            ret_val.set_act_scene(act, scene)
            mc = MarkovChain.MarkovChain()
            actors_seen = []
            while True:
                # Ask the markov chain for the most likely guess
                if len(actors_seen):
                    most_probable = mc.most_probable(actors_seen[-1])
                else:
                    most_probable = tuple()
                # If multiple values choose one randomly
                if len(most_probable) > 1:
                    my_choice = random.choice(most_probable)
                elif len(most_probable) == 1:
                    my_choice = most_probable[0]
                else:
                    # Not enough information
                    my_choice = ''
                actual_actor = ret_val.guess(my_choice)
                if actual_actor is None:
                    break
                else:
                    # Record the transition
                    if len(actors_seen):
                        mc.add(actors_seen[-1], actual_actor)
                    actors_seen.append(actual_actor)
    return ret_val
Beispiel #4
0
 def chainBuilder(self):
     parsedEpisodes = getEpisodes(self.episodes)
     for e in parsedEpisodes:
         castList = e.getCast()
         for c in castList:
             if c not in self.suppCharacters and c not in self.mainCharacters:
                 self.suppCharacters.add(c)
     directChain = MarkovChain(2)  #Change direction order here
     stageDirections = getAllStageDirs(self.episodes)
     for direction in stageDirections:
         directChain.addData(direction.getChainableSource())
     self.chains["STAGEDIR"] = directChain
     for i in self.mainCharacters:
         self.characterChain(i)
     for m in self.suppCharacters:
         self.characterChain(m)
Beispiel #5
0
def MarkovTrumpReactiveTweetGen(inp):
    ret = ""
    chain = MarkovChain.MarkovChain(csvParser.ImportTrumpData())
    if (inp == ''):
        ret = MarkovTweetGen(chain)
    else:
        ret = MarkovReactiveTweetGen(chain, inp)
    if (ret.endswith(',')):
        ret = ret[0:-1]
    if (not (ret.endswith(".")) and not (ret.endswith("!"))
            and not (ret.endswith("?"))):
        ret += '.'
    return ret
    def __init__(self):
        self.memory = []
        self.markov_graph = MarkovChain.MarkovChain()
        self.active_input_pattern_index = -1
        self.prev_active_input_pattern_index = -1

        self.temporal_groups = []
        self.active_temporal_group_index = -1

        # In the format (x1, y1, x2, y2) where (x1, y1) is the top left of the rectangle
        # and (x2, y2) is the bottom right of the rectangle. All rectangles are in the 4th
        # quadrant of the Euclidean plane where the negative y axis is positive.
        self.receptive_field_dimensions = None
    def __init__(self, inputText, n=1):
        inputText = string.replace(inputText, "\n"," ")
        self._inputText = string.split(inputText, " ")

        # Remove blanks from the list
        try:
            while 1:
                self._inputText.remove('')

        except ValueError:
            pass

        # allow for circular treatment of text, so that every
        # state will lead to at least one other state
        self._inputText = self._inputText + self._inputText[0:n+1]
        self._n = n
        self._markovChain = MarkovChain()
        self.__buildMarkovChain()
Beispiel #8
0
if not channel:
    channel = input("Channel: ")
if not dict_name:
    print("No dict name specified. Setting it to channel name.")
    dict_name = channel

username = ""
auth_token = ""

with open("creds.txt", 'r') as creds_file:
    username = creds_file.readline()
    auth_token = creds_file.readline()

irc_bot = TwitchChat.TwitchChat(username, auth_token, channel)
markov_chain = MarkovChain.MarkovChain(save_dir, dict_name)

while True:
    msg = irc_bot.next_msg()
    if msg:
        markov_chain.take_message(msg)

    if markov_chain.iterations() % 5 == 1:
        print("Channel:", markov_chain.dict_name)
        print("Iterations:", markov_chain.iterations())
        print("Time elapsed:", int(markov_chain.time_elapsed()), "seconds")
        print("Message:", markov_chain.make_message())
        print()

        markov_chain.save_progress()
Beispiel #9
0

with open("data_repo/data.json") as json_data:
    trump_data = json.load(json_data)
    json_data.close()

# apparently have to download this data set to build the tokenizer
nltk.download('punkt')

speech_text = PreProcess.bulk_txt_load('data_repo/*.txt')
text_extract = [x['text'] for x in trump_data]
text_extract.extend(speech_text)

# join all the tweets together into one long tweet
corpus = ' '.join(text_extract)

tokens = corpus.split(" ")

# bigrams = nltk.ngrams(tokens,2)
# trigrams = nltk.ngrams(tokens,3)
#
# ngram_list = [ item for item in ngrams]
#

mc = MarkovChain.MarkovModel()

#mc_model = mc.build_model(bigrams)
mc_model2 = mc.learn(tokens, 2)
output = mc.generate(2, max_tokens=125)

print(' '.join(output) + '.')
Beispiel #10
0
        word1 = inp
        retSen = word1.capitalize()
    else:
        if (inp.endswith('s')):
            word1 = "are"
        else:
            word1 = "is"
        retSen = inp.capitalize() + ' ' + word1

    for it in range(length - 1):
        word2 = random.choice(MC[word1])
        # Check for custom end of tweet token
        if (word2 == "@b@"):
            break
        word1 = word2
        retSen += " " + word1

    if (not (retSen.endswith(".")) and not (retSen.endswith("!"))
            and not (retSen.endswith("?"))):
        retSen += '.'
    print(retSen)
    return retSen


if (__name__ == "__main__"):
    #TweetCreator(input1)
    chain = MarkovChain.MarkovChain(csvParser.ImportTrumpData())
    for it in range(10):
        print("Test #" + str(it) + ':')
        MarkovTrumpReactiveTweetGen("")
Beispiel #11
0
from MarkovChain import *

m = MarkovChain()
m.transition_table = [[0.9, 0.05, 0.05], [0.5, 0.1, 0.4], [0, 1, 0]]
m.nodes = ["alive", "zombie", "dead"]

for i in m.generator():
    print(i)
Beispiel #12
0
                        default=0.99,
                        type=float,
                        help="discount_factor. Default = 0.99",
                        dest="discount_factor")
    args = parser.parse_args()

    #DP(args,8)
    States = environment.CreateStates(args.LT_f, args.LT_s, args.Inv_Max,
                                      args.Inv_Min, args.OrderFast,
                                      args.OrderSlow)
    Actions = environment.CreateActions(args.OrderFast, args.OrderSlow)
    dict_states = environment.CreateDictStates(States)

    with open('./A3C_policy.csv') as f:
        #States = []
        policy = []

        for line in f:
            #States.append(line.split(sep=';')[:2])
            policy.append(line.split(sep=';')[0:82])
            #a_prob_s.append(line.split(sep=';')[10:16])
        for index, i in enumerate(policy):
            policy[index] = [float(j) for j in policy[index]]

    #print(policy)
    P = PI.TransitionProbs(States, Actions, args.Demand_Max, args.LT_s,
                           args.LT_f, args.h, args.b, args.C_s, args.C_f,
                           args.Inv_Max, args.Inv_Min, args.cap_fast,
                           args.cap_slow, dict_states)
    print('Cost: ', MarkovChain.TestPolicy(States, P, policy))
Beispiel #13
0
def cost_per_period(States, Actions, dict_states, args, k, u, m, distribution,
                    identifier, demand_values):
    start = time.time()
    P = TransitionProbs(States, Actions, args.Demand_Max, args.LT_s, args.LT_f,
                        args.h, args.b, args.C_s, args.C_f, args.Inv_Max,
                        args.Inv_Min, args.cap_fast, args.cap_slow,
                        dict_states, k, u, m, distribution, demand_values)

    env = VI.VI_env(States, Actions, P)

    print('environment created', time.time() - start)
    #policy, v = PI.policy_improvement(env, args.discount_factor)
    policy, v = VI.value_iteration(env,
                                   theta=0.000001,
                                   discount_factor=args.discount_factor)

    np.save('v_%s_%s_%s.npy' % (identifier, args.LT_s, k), v)
    optimal_policy = []

    MC, MC_R = MarkovChain.MC(States, P, policy)
    steady_state = MarkovChain.steady_state(States, policy, MC)
    optimal_cost_array, share_expedited = MarkovChain.cost_steady_state(
        steady_state, Actions, policy, MC, MC_R)
    optimal_cost = np.sum(optimal_cost_array)

    with open(
            'optimal_policy-l_e%i-l_r%i-k%i-Distribution %s.csv' %
        (args.LT_f, args.LT_s, k, identifier), 'w') as f:
        f.write('OPTIMAL COST;' + str(optimal_cost) + '\n')
        f.write('Share expedited;' + str(share_expedited) + '\n')
        f.write('Share regular;' + str(1 - share_expedited) + '\n\n')

        f.write('PARAMETERS USED:\n')
        f.write('Demand;Prob\n')
        for index, item in enumerate(distribution):
            f.write(str(demand_values[index]) + ';' + str(item) + '\n')
        f.write('\n')

        f.write('l_r;')
        f.write(str(args.LT_s) + '\n')

        f.write('h;')
        f.write(str(args.h) + '\n')

        f.write('b;')
        f.write(str(args.b) + '\n')

        f.write('k;')
        f.write(str(k) + '\n')

        f.write('u;')
        f.write(str(u) + '\n')

        f.write('m;')
        f.write(str(m) + '\n')

        f.write('c_r;')
        f.write(str(args.C_s) + '\n')

        f.write('\n')
        for index, __ in enumerate(States[0]):
            f.write('State' + ';')
        f.write('optimal local' + ';' + 'optimal offshore')
        f.write(';prob state;')
        f.write('cost state;')
        f.write('weighted cost state;')
        f.write('\n')

        for index, state in enumerate(policy):
            for index2, action in enumerate(state):
                if (action == 1):
                    #print(States[index],Actions[index2])
                    optimal_policy.append([States[index], Actions[index2]])
                    for item in States[index]:
                        f.write(str(item) + ';')
                    for item in Actions[index2]:
                        f.write(str(item) + ';')
                    #print(steady_state,steady_state[0])
                    f.write(str(steady_state[index]) + ';')
                    if (steady_state[index] > 0):
                        f.write(
                            str(optimal_cost_array[index] /
                                steady_state[index]) + ';')
                    else:
                        f.write(str(0) + ';')
                    f.write(str(optimal_cost_array[index]))
                    f.write('\n')
    return optimal_cost, share_expedited
Beispiel #14
0
    num_nodes = len(G)
    num_items = len(G)

    k = 50
    item_distributions = ['uniform', 'direct', 'inverse', 'ego']

    for item_distribution in item_distributions:
        if item_distribution == 'ego':
            iterations = 10
        else:
            iterations = 1
        for iteration in xrange(iterations):
            print "Evaluating item distribution {}".format(item_distribution)

            __builtin__.mc = MarkovChain(num_nodes=num_nodes,
                                         num_items=num_items,
                                         item_distribution=item_distribution,
                                         G=G)

            print "Starting evaluation of methods"
            methods = [
                random_nodes, highest_item_nodes,
                highest_closeness_centrality_nodes,
                highest_in_degree_centrality_nodes,
                highest_in_probability_nodes,
                highest_betweenness_centrality_nodes, smart_greedy_parallel
            ]

            for method in methods:
                print "Evaluating method {}".format(method.func_name)
                get_objective_evolution(method, k, iteration)
Beispiel #15
0
 def setUp(self):
     self._c = MarkovChain.MarkovChain()
import MarkovChain
import random

if __name__ == "__main__":
    #Make a chain from shakespeare texts
    chain = MarkovChain.make_shakespear_chain(lines=2000)
    print "Hello my name is MarkovBot"
    #Print a ready symbol
    while (True):
        input = raw_input(">")
        #If input is exit, exit
        if input == "exit":
            import sys
            sys.exit()
        #Search the input for keywords
        candidate_tuples = []
        keywords = input.strip().split()
        for word in keywords:
            for tuple in chain.node_dict.keys():
                if word in tuple:
                    candidate_tuples.append(tuple)
                    break
        #Make a response from the keywords in the markov chain
        response = []
        response_candidate_list = []
        for tuple in candidate_tuples:
            response_candidate_list.append(chain.node_dict[tuple])
        #Choose a first word
        words_to_choose = []
        for candidate in response_candidate_list:
            for word in candidate.next_words.keys():
Beispiel #17
0
#! /c/Anaconda/python
from MarkovChain import *
mc = MarkovChain("./markov")
# To generate the markov chain's language model, in case it's not present
mc.generateDatabase("ThisThis string of Text. is This is a string of Text. a string of Text.It won't  is a string of Text. This is a string of Text. It won't generate string of Text. an interesting string of Text.database though.")
# To let the markov chain generate some text, execute
print mc.generateString() 
Beispiel #18
0
 def characterChain(self, character):
     tempChain = MarkovChain(2)  #Change line order here
     characterLines = getAllCharLines(self.episodes, character)
     for line in characterLines:
         tempChain.addData(line.getChainableSource())
     self.chains[character] = tempChain
import MarkovChain
import random

if __name__ == "__main__":
    #Make a chain from shakespeare texts
    chain = MarkovChain.make_shakespear_chain(lines = 2000)
    print "Hello my name is MarkovBot"
    #Print a ready symbol
    while(True):
        input = raw_input(">")
        #If input is exit, exit
        if input == "exit":
            import sys
            sys.exit()
        #Search the input for keywords
        candidate_tuples = []
        keywords = input.strip().split()
        for word in keywords:
            for tuple in chain.node_dict.keys():
                if word in tuple:
                    candidate_tuples.append(tuple)
                    break;
        #Make a response from the keywords in the markov chain
        response = []
        response_candidate_list = []
        for tuple in candidate_tuples:
            response_candidate_list.append(chain.node_dict[tuple])
        #Choose a first word
        words_to_choose = []
        for candidate in response_candidate_list:
            for word in candidate.next_words.keys():