Example #1
0
 def act(self, action, record=True, format=True):
     assert (self.context.strip() + action.strip())
     assert (settings.getint('top-keks') is not None)
     result = self.generator.generate(
         self.get_story() + action,
         self.context + ' '.join(self.memory),
         temperature=settings.getfloat('temp'),
         top_p=settings.getfloat('top-p'),
         top_k=settings.getint('top-keks'),
         repetition_penalty=settings.getfloat('rep-pen'))
     if record:
         self.actions.append(format_input(action))
         self.results.append(format_input(result))
     return format_result(result) if format else result
Example #2
0
def input_fn(serialized_input_data, content_type='text/csv'):
    print('Deserializing the input data.')
    if content_type == 'text/csv':
        try:
            data = serialized_input_data.decode('utf-8')
        except:
            data = serialized_input_data
        print(data)
        print(type(data))

        #calling lookuptable
        vector_table = get_lookup_table()

        # process input data and turn to numpy array using lookup table
        formatted_input_data = format_input(data)

        print(formatted_input_data)
        print(type(formatted_input_data))

        vectorised_input = lookup_table(vector_table, formatted_input_data)

        return vectorised_input

    raise Exception('Requested unsupported ContentType in content_type: ' +
                    content_type)
Example #3
0
def predict_fn(input_data, model):
    print('Determining nearest cluster.')

    #calling lookuptable
    lookup_table = get_lookup_table()

    # process input data and turn to numpy array using lookup table
    formatted_input_data = format_input(input_data)
    vectorised_input = lookup_table(search_table=lookup_table,
                                    formatted_input_data)

    output = sagemaker_model.predict(vectorised_input)

    return result
Example #4
0
def new_chat(update: Update, context: CallbackContext) -> int:
    interest = format_input(update.message.text)
    if interest in conversation_json:
        for answer in conversation_json[interest]['respostas']:
            answer_user(update, answer)
            time.sleep(0.8)
        if 'conversa' in conversation_json[interest]:
            context.user_data['conversation_json'] = conversation_json[
                interest]['conversa']
            return KEEP_CHATING
    else:
        update.message.reply_text(
            f'Não ouvi falar sobre {interest}, me conta mais!')

    return NEW_CHAT
Example #5
0
 def __init__(self, crib):
     self.crib = utils.format_input(crib)
     self.ciphertext = ""
     self.logger = logging.getLogger()
Example #6
0
stimuli_arr, actions_arr, stim_sides_arr, session_uuids = [], [], [], []

# select particular mice
mouse_name = 'KS016'
for i in range(len(sess_id)):
    if mice_names[i] == mouse_name:  # take only sessions of first mice
        data = utils.load_session(sess_id[i])
        if data['choice'] is not None and data['probabilityLeft'][0] == 0.5:
            stim_side, stimuli, actions, pLeft_oracle = utils.format_data(data)
            stimuli_arr.append(stimuli)
            actions_arr.append(actions)
            stim_sides_arr.append(stim_side)
            session_uuids.append(sess_id[i])

# format data
stimuli, actions, stim_side = utils.format_input(stimuli_arr, actions_arr,
                                                 stim_sides_arr)
session_uuids = np.array(session_uuids)

# import models
from models.expSmoothing_stimside import expSmoothing_stimside as exp_stimside
from models.expSmoothing_prevAction import expSmoothing_prevAction as exp_prevAction
from models.optimalBayesian import optimal_Bayesian as optBay
from models.biasedApproxBayesian import biased_ApproxBayesian as baisedApproxBay
from models.biasedBayesian import biased_Bayesian
'''
If you are interested in fitting (and the prior) of the mice behavior
'''
model = exp_prevAction('./results/inference/', session_uuids, mouse_name,
                       actions, stimuli, stim_side)
model.load_or_train(remove_old=False)
param = model.get_parameters()  # if you want the parameters