Example #1
0
def get_dynamic_performance_profile(instances, config, selector):
    classes = config['solution_quality_classes']
    bounds = config['solution_quality_class_bounds']
    count = config['solution_quality_class_count']

    groups = utils.get_groups(instances, 'qualities')
    estimated_groups = utils.get_groups(instances, 'estimated_qualities')

    length = utils.get_max_list_length(groups)
    steps = range(length)

    trimmed_groups = utils.get_trimmed_lists(groups, length)
    trimmed_estimated_groups = utils.get_trimmed_lists(estimated_groups, length)

    profile = get_initial_dynamic_performance_profile(classes, count, steps)

    for i, _ in enumerate(trimmed_groups):
        qualities = trimmed_groups[i]
        estimated_qualities = trimmed_estimated_groups[i]

        for step in steps:
            if step + 1 < length:
                origin_quality, target_quality = selector(qualities, estimated_qualities, step)
                origin_class = utils.digitize(origin_quality, bounds)
                target_class = utils.digitize(target_quality, bounds)

                profile[origin_class][step][target_class] += 1

    return get_normalized_performance_profile(profile, classes, steps)
Example #2
0
def static_scene_finder(folder_path):
    print('Detecting static scenes on folder: ', folder_path)
    files_list_init = [folder_path+f for f in os.listdir(folder_path)
                  if f.endswith('.jpg') or f.endswith('.JPG') or f.endswith('.png')]
    print('Found {} files'.format(len(files_list_init)))

    # sort files by their date taken
    files_list = utils.sort_images_bydatetaken(files_list_init)

    # compute hashes of the images
    hashes = dhash_utils.compute_hashes(files_list)
    print('Hashes', hashes)
    # get bits differences (hamming distances of consecutive image pairs)
    bit_diffs = dhash_utils.compute_hamming_dist(hashes)
    print('Hamming distances', bit_diffs)
    # bit_diffs.append(0)
    # df = pd.DataFrame(
    #     {'image_id_sorted': files_list,
    #      'hash': hashes,
    #      'hamming_distance': bit_diffs
    #      })
    # df.to_csv('df.csv')
    if not bit_diffs:
        return []
    # detect the static scenes based on the bit_diffs list
    static_scenes = utils.get_groups(bit_diffs, threshold=6)

    # find the sequences of the actual images
    sequences = []
    for group in static_scenes:
        print(group['start_index'])
        print(group['values'])
        sequences.append(files_list[group["start_index"]:group["start_index"] + len(group["values"]) + 1])

    return sequences
Example #3
0
def get_probabilistic_performance_profile(instances, config):
    groups = utils.get_groups(instances, 'qualities')

    length = utils.get_max_list_length(groups)
    steps = range(length)

    trimmed_groups = utils.get_trimmed_lists(groups, length)

    profile = get_initial_probabilistic_performance_profile(config['solution_quality_class_count'], steps)

    for step in steps:
        for qualities in trimmed_groups:
            target_quality = qualities[step]
            target_class = utils.digitize(target_quality, config['solution_quality_class_bounds'])
            profile[step][target_class] += 1

        normalizer = sum(profile[step])
        for target_class in config['solution_quality_classes']:
            profile[step][target_class] /= normalizer

    return profile
Example #4
0
    def get_items(self):
        query = self._get_active_query()

        if ((query.startswith('lights') or query.startswith('groups')) and len(query.split(':')) >= 3):
            action_filter = HueActionFilter(self.workflow)
            control = query.split(':')
            lights = utils.get_lights(from_cache=True)
            groups = utils.get_groups()
            rid = control[1]

            self.items = action_filter.get_items(
                query=':'.join(control[2:]),  # lights:1:<light_query>
                id=rid,
                type=HueActionFilter.LIGHT_TYPE if query.startswith('lights') else HueActionFilter.GROUP_TYPE,
                resource=lights.get(rid, None) if query.startswith('lights') else groups.get(rid, None))

        else:  # Show index
            if not self.workflow.settings.get('username'):
                self._add_item(
                    'bridge_failed',
                    valid=True,
                    title='Link with Hue bridge',
                    arg='set_bridge:%s' % query)

            else:
                lights = utils.get_lights()
                groups = utils.get_groups()

                if not lights:
                    self._add_item(
                        'bridge_failed',
                        valid=True,
                        arg='set_bridge:%s' % query)

                else:
                    self._add_item('all_lights')

                    for rid, room in groups.items():
                        self._add_item(
                            title=room['name'],
                            autocomplete='groups:%s:' % rid)

                    if query.startswith('lights:') or query.startswith('groups:'):
                        self.partial_query = query.split(':')[1]

                    for lid, light in lights.items():
                        title = light['name']

                        if light['state']['on']:
                            subtitle = []
                            if light['state'].get('hue'):
                                subtitle.append('hue: {hue}'.format(
                                    hue='{0:.0f}°'.format(float(light['state']['hue']) / 65535 * 360)))
                            if light['state'].get('bri') is not None:
                                subtitle.append('bri: {bri}'.format(
                                    bri='{0:.0f}%'.format(float(light['state']['bri']) / 255 * 100)))
                            if light['state'].get('sat') is not None:
                                subtitle.append('sat: {sat}'.format(
                                    sat='{0:.0f}%'.format(float(light['state']['sat']) / 255 * 100)))
                            subtitle = ', '.join(subtitle) or 'on'
                            icon = '%s.png' % lid
                        else:
                            subtitle = 'off'
                            icon = 'off.png'

                        if not light['state'].get('reachable'):
                            title += ' **'
                            subtitle += ' — may not be reachable'

                        self._add_item(
                            title=title,
                            subtitle='({lid}) {subtitle}'.format(
                                lid=lid,
                                subtitle=subtitle,
                            ),
                            icon=icon,
                            autocomplete='lights:%s:' % lid)

                    # self._add_item('help')

        self._filter_items()
        return self.items
import config
import utils

accounts = config.TELEGRAM_ACCOUNTS[:1]

clients = utils.login(accounts)
client = clients[0]

groups = utils.get_groups(client)

print("Choose a group to scrape members from:")
for idx, g in enumerate(groups):
    print(f"{idx}, {g.title}")

g_index = input("Enter a Number: ")
target_group = groups[int(g_index)]

print("Fetching Members and save to file")

utils.save_members(client, target_group, "members.csv")

client.disconnect()
print("Members scraped successfully.")
Example #6
0
    if pj not in codes[dp]:
        codes[dp][pj] = []

    codes[dp][pj].append(sp)

subject = 'Engineering Spare Parts QTY Alarm'
author = (('QTY Alarm', '*****@*****.**'))
line = '~' * 60
remark = 'This notification will be sent out from Monday to Friday'
no_leader = '''<h1><font color="red">
No project leader specified for:</font></h1><br />'''

css = dict(td_style=mt.td_style)
for dp, projects in codes.items():
    for pj, sps in projects.items():
        groups = utils.get_groups(db, dp, pj, role='project_leader')
        to_list = utils.get_users(db, groups, can_send='alarm')
        body = []
        if not to_list:
            groups = utils.get_groups(db, dp, pj, role='asset_leader')
            to_list = utils.get_users(db, groups)
            body.append(no_leader)

        body.append(mt.header.format(header=subject,
                                     department=dp,
                                     project=pj,
                                     )
                    )
        body.append(mt.table_header.format(table_style=mt.table_style,
                                           thead_style=mt.thead_style,
                                           tr_style=mt.tr_style,
Example #7
0
    def execute(self, action):
        is_light = action[0] == 'lights'
        is_group = action[0] == 'groups'

        if not is_light and not is_group:
            return

        rid = action[1]
        function = action[2]
        value = action[3] if len(action) > 3 else None
        lights = utils.get_lights()
        groups = utils.get_groups()

        # Default API request parameters
        method = 'put'
        endpoint = '/groups/%s/action' % rid if is_group else '/lights/%s/state' % rid

        if function == 'off':
            data = {'on': False}

        elif function == 'on':
            data = {'on': True}

        elif function == 'bri':
            value = int((float(value) / 100) * 255) if value else 255
            data = {'bri': value}

        elif function == 'shuffle':
            if not is_group:
                print('Shuffle can only be called on groups.'.encode('utf-8'))
                return

            self._shuffle_group(rid)
            return True

        elif function == 'rename':
            endpoint = '/lights/%s' % rid
            data = {'name': value}

        elif function == 'effect':
            data = {'effect': value}

        elif function == 'color':
            if value == 'random':
                if is_group:
                    gamut = colors.GamutA
                    data = {'xy': self._get_random_xy_color(gamut)}
                else:
                    gamut = colors.get_light_gamut(lights[rid]['modelid'])
                    data = {'xy': self._get_random_xy_color(gamut)}
            else:
                try:
                    if is_group:
                        gamut = colors.GamutA
                    else:
                        gamut = colors.get_light_gamut(lights[rid]['modelid'])
                    data = {'xy': self._get_xy_color(value, gamut)}
                except ValueError:
                    print(
                        'Error: Invalid color. Please use a 6-digit hex color.'
                        .encode('utf-8'))
                    return

        elif function == 'harmony':
            if not is_group:
                print('Color harmonies can only be set on groups.'.encode(
                    'utf-8'))
                return

            root = action[4] if len(action) > 3 else None

            if value not in harmony.MODES:
                print('Invalid harmony mode.'.encode('utf-8'))
                return

            self._set_harmony(rid, value, root)
            return

        elif function == 'reminder':
            try:
                time_delta_int = int(value)
            except ValueError:
                print(
                    'Error: Invalid time delta for reminder.'.encode('utf-8'))
                return

            reminder_time = datetime.datetime.utcfromtimestamp(time.time() +
                                                               time_delta_int)

            method = 'post'
            data = {
                'name': 'Alfred Hue Reminder',
                'command': {
                    'address': self.hue_request.api_path + endpoint,
                    'method': 'PUT',
                    'body': {
                        'alert': 'lselect'
                    },
                },
                'time': reminder_time.replace(microsecond=0).isoformat(),
            }
            endpoint = '/schedules'

        elif function == 'set':
            data = {'scene': value}

        elif function == 'save':
            lids = utils.get_group_lids(rid)
            method = 'post'
            endpoint = '/scenes'
            data = {'name': value, 'lights': lids, 'recycle': False}

        else:
            return

        # Make the request
        self.hue_request.request(method, endpoint, json.dumps(data))

        return
Example #8
0
body_title = '<h1>There are {} assets need assign Flex Asset ID:</h1>'
no_leader_title = '''<h1><font color="red">
But no asset user is specified.</font></h1>'''
line = '~' * 60
dept_fmt = '&raquo; Department: <strong>{}</strong><br />'
proj_fmt = '&raquo; Project: <strong>{}</strong>'
url = '''More details on <a href='http://{ip}/buys/?routing=accept'>
http://{ip}/buys/?routing=accept</a> or
<a href='http://teasset/buys/?routing=accept'>
http://teasset/buys/?routing=accept</a>.
'''.format(ip=utils.get_ip())
remark = '''This notification will be sent out from Monday to Friday'''

for dept, projects in users.items():
    for proj, num in projects.items():
        groups = utils.get_groups(db, dept, proj, role='asset_user')
        to_list = utils.get_users(db, groups)
        body = [body_title.format(num)]
        if to_list:
            body.append(url)
        else:
            body.append(no_leader_title)
            groups = utils.get_groups(db, dept, proj, role='asset_leader')
            to_list = utils.get_users(db, groups)

        body.append(dept_fmt.format(dept))
        body.append(proj_fmt.format(proj))
        body.append('<br />')
        body.append(line)
        body.append(remark)
        send_mail(subject=subject,
Example #9
0
    def execute(self, action):
        is_light = action[0] == 'lights'
        is_group = action[0] == 'groups'

        if not is_light and not is_group:
            return

        rid = action[1]
        function = action[2]
        value = action[3] if len(action) > 3 else None
        lights = utils.get_lights()
        groups = utils.get_groups()

        # Default API request parameters
        method = 'put'
        endpoint = '/groups/%s/action' % rid if is_group else '/lights/%s/state' % rid

        if function == 'off':
            data = {'on': False}

        elif function == 'on':
            data = {'on': True}

        elif function == 'bri':
            value = int((float(value) / 100) * 255) if value else 255
            data = {'bri': value}

        elif function == 'shuffle':
            if not is_group:
                print('Shuffle can only be called on groups.'.encode('utf-8'))
                return

            self._shuffle_group(rid)
            return True

        elif function == 'rename':
            endpoint = '/lights/%s' % rid
            data = {'name': value}

        elif function == 'effect':
            data = {'effect': value}

        elif function == 'color':
            if value == 'random':
                if is_group:
                    gamut = colors.GamutA
                    data = {'xy': self._get_random_xy_color(gamut)}
                else:
                    gamut = colors.get_light_gamut(lights[rid]['modelid'])
                    data = {'xy': self._get_random_xy_color(gamut)}
            else:
                try:
                    if is_group:
                        gamut = colors.GamutA
                    else:
                        gamut = colors.get_light_gamut(lights[rid]['modelid'])
                    data = {'xy': self._get_xy_color(value, gamut)}
                except ValueError:
                    print('Error: Invalid color. Please use a 6-digit hex color.'.encode('utf-8'))
                    return

        elif function == 'harmony':
            if not is_group:
                print('Color harmonies can only be set on groups.'.encode('utf-8'))
                return

            root = action[4] if len(action) > 3 else None

            if value not in harmony.MODES:
                print('Invalid harmony mode.'.encode('utf-8'))
                return

            self._set_harmony(rid, value, root)
            return

        elif function == 'reminder':
            try:
                time_delta_int = int(value)
            except ValueError:
                print('Error: Invalid time delta for reminder.'.encode('utf-8'))
                return

            reminder_time = datetime.datetime.utcfromtimestamp(time.time() + time_delta_int)

            method = 'post'
            data = {
                'name': 'Alfred Hue Reminder',
                'command': {
                    'address': self.hue_request.api_path + endpoint,
                    'method': 'PUT',
                    'body': {'alert': 'lselect'},
                },
                'time': reminder_time.replace(microsecond=0).isoformat(),
            }
            endpoint = '/schedules'

        elif function == 'set':
            data = {'scene': value}

        elif function == 'save':
            lids = utils.get_group_lids(rid)
            method = 'post'
            endpoint = '/scenes'
            data = {'name': value, 'lights': lids, 'recycle': False}

        else:
            return

        # Make the request
        self.hue_request.request(method, endpoint, json.dumps(data))

        return
Example #10
0
    """
    Generates dataset where points are shaped in a line,
    and labels then with y_val.
    """
    noise=0.1
    X = map(lambda p: [p+(random()*noise),p+(random()*noise)], [random() for _ in range(n)])
    return (X, [y_val] * len(X))


if __name__ == '__main__':
    X1,y1 = generate_circles(20, 0)
    X2,y2 = generate_line(20, 1)

    X = np.vstack((X1,X2))
    y = y1 + y2

    classes = ['circles', 'lines']
    Xs = flatten(map(halve_group, get_groups(X, y)))
    Rs = map(get_max_dist, Xs)
    cxs = map(cech, Xs)
    diagrams = [persistence_diagram(cx, R, n_intervals=None) for cx, R in zip(cxs, Rs)]
    titles = flatten([[name + '_train', name + '_test'] for name in classes])
    cluster_distances(map(lambda diagram: [PersistenceDiagram(d, diagram[d]) for d in range(3)], diagrams), ps=[0,1,2],
                      labels=titles, name="toy")
    i = 0
    for diagram in diagrams:
        diagram, max_j = fix_infs(diagram)
        draw_persistance_diagram(diagram, max_j, titles[i])
        draw_bar_code_graph(diagram, max_j, titles[i])
        i += 1
Example #11
0
from draw import draw_bar_code_graph,draw_persistance_diagram

cx_method = cech
dims = 3
random_split = True
use_default_R = True
n_intervals = None
funcs = []
prepre = StemLemPrepreprocessor

funcs = [word_lengths_funcs, sentence_lengths_funcs, ratio_most_n_common_words, ratio_length_of_words_texts,
            lambda text: ratio_length_of_words_texts(text, 8, ge)]
pp = Preprocessor(prepre, funcs, use_tfidf=50)
folder_names = ['abstracts', 'sports', 'reviews']
X, y = pp.process(['../data/' + fold_n for fold_n in folder_names])
Xs = flatten([halve_group(g, random_split=random_split) for g in get_groups(X, y)])
if cx_method == alpha_shapes:
    Xs = map(lambda X: reduce_n_columns(X, n=dims), Xs)
cxs = map(cx_method, Xs)
if use_default_R:
    Rs = map(get_max_dist, Xs)
else:
    Rs = map(lambda cx: max([sx.data for sx in cx]), cxs)
diagrams = [persistence_diagram(cx, R, X, n_intervals=n_intervals) for cx, R, X in zip(cxs, Rs, Xs)]
titles = flatten([[name + '_train', name + '_test'] for name in folder_names])
cluster_distances(map(lambda diagram: [PersistenceDiagram(d, diagram[d]) for d in range(dims)], diagrams),
                  labels=titles, name="main", ps=[0,1,2])
i = 0
for diagram in diagrams:
    print diagram
    diagram, max_j = fix_infs(diagram)
Example #12
0
    """
    noise = 0.1
    X = map(lambda p: [p + (random() * noise), p + (random() * noise)],
            [random() for _ in range(n)])
    return (X, [y_val] * len(X))


if __name__ == '__main__':
    X1, y1 = generate_circles(20, 0)
    X2, y2 = generate_line(20, 1)

    X = np.vstack((X1, X2))
    y = y1 + y2

    classes = ['circles', 'lines']
    Xs = flatten(map(halve_group, get_groups(X, y)))
    Rs = map(get_max_dist, Xs)
    cxs = map(cech, Xs)
    diagrams = [
        persistence_diagram(cx, R, n_intervals=None) for cx, R in zip(cxs, Rs)
    ]
    titles = flatten([[name + '_train', name + '_test'] for name in classes])
    cluster_distances(map(
        lambda diagram: [PersistenceDiagram(d, diagram[d]) for d in range(3)],
        diagrams),
                      ps=[0, 1, 2],
                      labels=titles,
                      name="toy")
    i = 0
    for diagram in diagrams:
        diagram, max_j = fix_infs(diagram)
Example #13
0
    def get_items(self):
        query = self._get_active_query()

        if ((query.startswith('lights') or query.startswith('groups')) and len(query.split(':')) >= 3):
            action_filter = HueActionFilter(self.workflow)
            control = query.split(':')
            lights = utils.get_lights(from_cache=True)
            groups = utils.get_groups()
            rid = control[1]

            self.items = action_filter.get_items(
                query=':'.join(control[2:]),  # lights:1:<light_query>
                id=rid,
                type=HueActionFilter.LIGHT_TYPE if query.startswith('lights') else HueActionFilter.GROUP_TYPE,
                resource=lights.get(rid, None) if query.startswith('lights') else groups.get(rid, None))

        else:  # Show index
            if not self.workflow.settings.get('username'):
                self._add_item(
                    'bridge_failed',
                    valid=True,
                    title='Link with Hue bridge',
                    arg='set_bridge:%s' % query)

            else:
                lights = utils.get_lights()
                groups = utils.get_groups()

                if not lights:
                    self._add_item(
                        'bridge_failed',
                        valid=True,
                        arg='set_bridge:%s' % query)

                else:
                    self._add_item('all_lights')

                    for rid, room in groups.items():
                        self._add_item(
                            title=room['name'],
                            autocomplete='groups:%s:' % rid)

                    if query.startswith('lights:') or query.startswith('groups:'):
                        self.partial_query = query.split(':')[1]

                    for lid, light in lights.items():
                        title = light['name']

                        if light['state']['on']:
                            subtitle = []
                            if light['state'].get('hue'):
                                subtitle.append('hue: {hue}'.format(
                                    hue='{0:.0f}°'.format(float(light['state']['hue']) / 65535 * 360)))
                            if light['state'].get('bri') is not None:
                                subtitle.append('bri: {bri}'.format(
                                    bri='{0:.0f}%'.format(float(light['state']['bri']) / 255 * 100)))
                            if light['state'].get('sat') is not None:
                                subtitle.append('sat: {sat}'.format(
                                    sat='{0:.0f}%'.format(float(light['state']['sat']) / 255 * 100)))
                            subtitle = ', '.join(subtitle) or 'on'
                            icon = '%s.png' % lid
                        else:
                            subtitle = 'off'
                            icon = 'off.png'

                        if not light['state'].get('reachable'):
                            title += ' **'
                            subtitle += ' — may not be reachable'

                        self._add_item(
                            title=title,
                            subtitle='({lid}) {subtitle}'.format(
                                lid=lid,
                                subtitle=subtitle,
                            ),
                            icon=icon,
                            autocomplete='lights:%s:' % lid)

                    # self._add_item('help')

        self._filter_items()
        return self.items
Example #14
0
def main(data_train_path,
         data_validate_path,
         vocabulary_path,
         trainedParamsPath=None,
         mode="TRAIN",
         data_group_len=3,
         batchsize=128,
         n_epoch=512,
         learning_rate=0.001,
         l2_regu_rate=0.001,
         word_dim=80,
         hiden_dim=80,
         keep_rate=0.9):

    # CONSTANT
    RNG = numpy.random.RandomState(220495)
    N_NON_WORD = 1
    TRAINNING = 1.0
    VALIDATING = 0.0
    # collect data for training process
    word_to_index, index_to_word = json.load(open(vocabulary_path, "r"))
    vocab = numpy.array(list(word_to_index.keys()))
    facts, ques, answ = numpy.load(data_train_path)
    data_train = numpy.dstack((facts, ques, answ))[0]

    facts, ques, answ = numpy.load(data_validate_path)
    data_val = numpy.dstack((facts, ques, answ))[0]

    groups_data_train = get_groups(data_train, groups_len=data_group_len)
    groups_data_val = get_groups(data_val, groups_len=data_group_len)

    ####################
    # LOAD MODEL HERE  #
    ####################

    if os.path.isfile(trainedParamsPath):
        with open(trainedParamsPath, 'rb') as handle:
            trainedParams = pickle.load(handle)
    else:
        print("No Trained Model, create new")
        trainedParams = {
            'EMBD': None,
            'INPUT_FUSION_LAYER': None,
            'EPISODIC_MEM_PASS1': None,
            'EPISODIC_MEM_PASS2': None,
            'EPISODIC_MEM_PASS3': None,
            'prediction_layer': None
        }

    ####################
    # BUILD MODEL HERE #
    ####################

    # input tensor
    tensor_facts = T.itensor3()
    tensor_question = T.imatrix()
    train_state = T.dscalar()
    keep_rate = theano.shared(numpy.asarray(keep_rate,
                                            dtype=theano.config.floatX),
                              borrow=True)
    # Words embedding and Encoding scheme for sentences (facts), question, v.v
    EMBD = EncodingLayer(num_vocab=len(vocab) - N_NON_WORD,
                         word_dim=word_dim,
                         rng=RNG,
                         embedding_w=trainedParams["EMBD"])
    dropout_layer = DropOut(RNG=RNG)

    # positional encoding scheme for list of facts
    tensor_facts_embded = EMBD.sents_ind_2vec(tensor_facts)
    tensor_question_embded = EMBD.words_ind_2vec(tensor_question)

    tensor_facts_embded = dropout_layer.output(layer_input=tensor_facts_embded,
                                               keep_rate=keep_rate,
                                               train=train_state)

    # INPUT MODULE --- Input Fusion Layer + Bi-Directional GRU
    INPUT_FUSION_LAYER = InputModule(
        RNG_=RNG,
        paramsTrained=trainedParams["INPUT_FUSION_LAYER"],
        input_sents=tensor_facts_embded,
        quesion=tensor_question_embded,
        num_in=word_dim,
        num_out=hiden_dim,
        name="INPUT_FUSION_LAYER")

    # INPUT_FUSION_LAYER output -- Bi-directional GRU facts and GRU of question
    bi_directional_facts, gru_question = INPUT_FUSION_LAYER.output

    # EPISODIC MEMORY NETWORK LAYERS --- Attention gate in GRU plus
    #                                         + Memory Update
    EPISODIC_MEM_PASS1 = EpisodicModule(
        RNG=RNG,
        fact_dim=hiden_dim,
        context_dim=160,
        mem_dim=hiden_dim,
        paramsTrained=trainedParams["EPISODIC_MEM_PASS1"],
        name="EPISODIC_MEM_PASS1_")
    EPISODIC_MEM_PASS2 = EpisodicModule(
        RNG=RNG,
        fact_dim=hiden_dim,
        context_dim=160,
        mem_dim=hiden_dim,
        paramsTrained=trainedParams["EPISODIC_MEM_PASS2"],
        name="EPISODIC_MEM_PASS2_")
    EPISODIC_MEM_PASS3 = EpisodicModule(
        RNG=RNG,
        fact_dim=hiden_dim,
        context_dim=160,
        mem_dim=hiden_dim,
        paramsTrained=trainedParams["EPISODIC_MEM_PASS3"],
        name="EPISODIC_MEM_PASS3_")

    # create initial memmory vector
    begin_memory_state = T.identity_like(gru_question)
    begin_memory_state = T.fill(begin_memory_state, gru_question)

    # PASS 1
    episodic_mem_pass1, atten1 = EPISODIC_MEM_PASS1.output(
        bi_directional_facts, gru_question, begin_memory_state)

    # PASS 1
    episodic_mem_pass2, atten2 = EPISODIC_MEM_PASS2.output(
        bi_directional_facts, gru_question, episodic_mem_pass1)

    # PASS 1
    episodic_mem_pass3, atten3 = EPISODIC_MEM_PASS3.output(
        bi_directional_facts, gru_question, episodic_mem_pass2)

    # dropout_layer for answer module
    episodic_mem_pass3 = dropout_layer.output(layer_input=episodic_mem_pass3,
                                              keep_rate=keep_rate,
                                              train=train_state)

    # Prediction layer LogisticRegression
    prediction_layer = LogisticRegression(
        rng=RNG,
        paramsLayer=trainedParams["prediction_layer"],
        layerInput=episodic_mem_pass3,
        n_in=hiden_dim,
        n_out=len(vocab) - N_NON_WORD,
        name="prediction_layer_")

    if (mode == "TRAIN"):
        tensor_answers = T.imatrix()
        # COST FUNCTION --- negative log likelihood function
        n_sample = tensor_answers.shape[0]
        true_label = (tensor_answers - N_NON_WORD).reshape(shape=(n_sample, ))

        # l2 regulation
        L2 = (INPUT_FUSION_LAYER.L2 + EPISODIC_MEM_PASS1.L2 +
              EPISODIC_MEM_PASS2.L2 + EPISODIC_MEM_PASS3.L2 +
              prediction_layer.L2)

        # Negative log likelihood
        NLL_loss = prediction_layer.neg_log_likelihood(true_label)

        # COST FUNTION
        COST_VALUE = NLL_loss + l2_regu_rate * L2

        # Create list params and updates method with ADAM Optimizer
        PARAMS_LIST = (EMBD.params + INPUT_FUSION_LAYER.params +
                       EPISODIC_MEM_PASS1.params + EPISODIC_MEM_PASS2.params +
                       EPISODIC_MEM_PASS3.params + prediction_layer.params)

        UPDATE_PARAMS = ADAM_OPTIMIZER(loss=COST_VALUE,
                                       all_params=PARAMS_LIST,
                                       learning_rate=learning_rate)

        # Create function call for train and validate
        TRAIN = theano.function(
            inputs=[
                tensor_facts, tensor_question, tensor_answers, train_state
            ],
            outputs=[COST_VALUE,
                     prediction_layer.cal_errors(true_label)],
            updates=UPDATE_PARAMS,
        )

        VALIDATE = theano.function(inputs=[
            tensor_facts, tensor_question, tensor_answers, train_state
        ],
                                   outputs=[
                                       NLL_loss,
                                       prediction_layer.cal_errors(true_label),
                                       atten1, atten2, atten3
                                   ],
                                   on_unused_input="warn")

        def getAllParams():
            paramsTrained = {
                "EMBD": EMBD.get_params(),
                "INPUT_FUSION_LAYER": INPUT_FUSION_LAYER.get_params(),
                "EPISODIC_MEM_PASS1": EPISODIC_MEM_PASS1.get_params(),
                "EPISODIC_MEM_PASS2": EPISODIC_MEM_PASS2.get_params(),
                "EPISODIC_MEM_PASS3": EPISODIC_MEM_PASS3.get_params(),
                "prediction_layer": prediction_layer.get_params()
            }
            return (paramsTrained)

        min_error = 0.9
        ####################
        # TRAIN MODEL HERE #
        ####################
        for epc in range(n_epoch):
            print("################## New Epoch ##################")
            error_train = numpy.zeros(shape=(len(groups_data_train), ))
            for group, data_train in enumerate(groups_data_train):
                n_inter = int(len(data_train) / batchsize) + 1
                list_error = []
                for iter_n in range(n_inter):
                    bz = min(batchsize, len(data_train))
                    sample_batch = get_batch(data_train, size=bz)
                    list_facts, list_ques, list_answ = sample_batch
                    cost, errors = TRAIN(list_facts, list_ques, list_answ,
                                         TRAINNING)
                    print(
                        "Epoch %i groups %i iter %i "
                        "with cost %f and errors %f" %
                        (epc, group, iter_n, cost, errors), "input shape",
                        numpy.array(list_facts).shape)

                paramsTrained = getAllParams()
                list_error.append(errors)
                error_train[group] = numpy.mean(list_error)

            if numpy.mean(error_train) < 0.9:
                error_val = []
                all_data_len = []
                for group, data_val in enumerate(groups_data_val):
                    data_len = len(data_val)
                    sample_validate = get_batch(data_val, size=data_len)
                    list_facts, list_ques, list_answ = sample_validate
                    cost, errors, at1, at2, at3 = VALIDATE(
                        list_facts, list_ques, list_answ, VALIDATING)
                    print("################## VALIDATION ####################")
                    print(
                        "Epoch %i groups %i with cost %f and errors %f "
                        "in %i samples" % (epc, group, cost, errors, data_len),
                        "input shape",
                        numpy.array(list_facts).shape)
                    error_val.append(errors)
                    all_data_len.append(data_len)
                error_val = numpy.asarray(error_val)
                all_error = error_val * numpy.asarray(all_data_len)
                total_error = numpy.sum(all_error) / numpy.sum(all_data_len)
                print("Epoch %i with total error %f" % (epc, total_error))

                if total_error < min_error:
                    print("Save params with new error: %f" % total_error)
                    min_error = total_error
                    model_train = getAllParams()
                    with open(trainedParamsPath, 'wb') as handle:
                        pickle.dump(model_train,
                                    handle,
                                    protocol=pickle.HIGHEST_PROTOCOL)
    # just for play
    else:
        TEST = theano.function(
            inputs=[tensor_facts, tensor_question, train_state],
            outputs=[prediction_layer.y_predict, atten1, atten2, atten3],
            on_unused_input="warn")
        print("##############################################################")
        print("##############################################################")
        print("")
        print(
            "Only use words in this list, other "
            "word might give different result", list(word_to_index.keys()))
        time.sleep(2)
        while True:
            sents = input("Please input evidence sentences separated by '; '"
                          " inpput 'END' for stop: ")
            if sents == "END":
                break
            ques = input("Please input question sentences: ")
            print("Parsing evidence sentences: ..... ")
            sents = sents.split("; ")
            indexed_sents = []
            for sent in sents:
                indexed_sent = []
                for word in sent.split(" "):
                    if word in word_to_index:
                        indexed_sent.append(word_to_index[word])
                    else:
                        indexed_sent.append(word_to_index["PADDING"])
                indexed_sents.append(indexed_sent)

            print("Parsing question sentences: ..... ")
            indexed_question = []
            for word in ques.split(" "):
                if word in word_to_index:
                    indexed_question.append(word_to_index[word])
                else:
                    indexed_question.append(word_to_index["PADDING"])

            max_words = max(map(len, indexed_sents))
            list_facts = padding(indexed_sents,
                                 shape=(len(indexed_sents), max_words))
            list_ques = indexed_question
            anws, att1, att2, att3 = TEST([list_facts], [list_ques], 0.0)

            print("The machine read story the first time and "
                  "gennerate the attention score for each "
                  "sentence as below: ...")
            print([str(round(elem, 3)) for elem in att1[0]])
            print("The machine read story the seccond time and "
                  "gennerate the attention score for each "
                  "sentence as below: ...")
            print([str(round(elem, 3)) for elem in att2[0]])
            print("The machine read story the third time and "
                  "gennerate the attention score for each "
                  "sentence as below: ...")
            print([str(round(elem, 3)) for elem in att3[0]])
            print("Then machine answer: ", index_to_word[str(anws[0] + 1)])