예제 #1
0
    def add_or_update_user(self, uid, updatelist, profileversion, ip, port, profile=None):
        user = get_user(uid)
        newuser = (user == None)
        if newuser:
            user = create_user(uid)
            if not user:
                warning('community: Unable to create a new user %s\n' % uid)
                return

        if ip != None:
            user.set('ip', ip)
            user.set('port', port)

        if newuser or user.get('v') != profileversion:
            user.update_attributes(updatelist, user.get('v'))

            if profile != None:
                self.got_user_profile(user, profile, None)
            elif not user.inprogress:
                debug('Fetching new profile from user %s\n' % user.tag())
                request = {'t': 'uprofile'}
                if self.fetcher.fetch(user, PLUGIN_TYPE_COMMUNITY, request, self.got_user_profile):
                    user.inprogress = True

        elif not user.present and not user.inprogress:
            # User appears and user profile is already up-to-date
            self.request_user_icon(user)
            self.fetch_community_profiles(user)

        if user.update_presence(True):
            self.announce_user(user)
예제 #2
0
    def __init__(self, profile_dialog, resolution, got_photo_cb):
        self.cb = got_photo_cb
        self.dialog = gtk.Dialog('Camera', profile_dialog,
            gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL)
        self.dialog.set_has_separator(False)
        self.image = gtk.DrawingArea()
        self.image.set_size_request(resolution[0], resolution[1])
        self.help_text = gtk.Label('Click to take picture')

        try:
            self.camera = Camera(resolution, self.image)
        except Camera_Exception:
            debug('profile dialog: Unable to initialize camera\n')
            self.camera = None
            self.help_text.set_label('No camera found')

        self.image_hbox = gtk.HBox()
        self.image_hbox.pack_start(gtk.HBox())
        self.image_hbox.pack_start(self.image, False, False)
        self.image_hbox.pack_start(gtk.HBox())
        if self.camera != None:
            self.dialog.vbox.pack_start(self.image_hbox)
        self.dialog.vbox.pack_start(self.help_text, False, True)
        self.close_button = gtk.Button('Close')
        self.dialog.vbox.pack_start(self.close_button, False, True)
        self.close_button.connect('clicked', self.close_clicked)
        self.dialog.connect('response', self.dialog_response)


        self.image.add_events(gtk.gdk.BUTTON_PRESS_MASK)
        self.image.connect('button-press-event', self.image_clicked)
        self.dialog.show_all()
예제 #3
0
    def connect(self):
        ip = self.user.get('ip')
        port = self.user.get('port')

        if not community.get_network_state(community.IP_NETWORK):
            # Act as if we were missing the IP network
            warning('fetcher: IP network disabled\n')
            ip = None

        if ip == None or port == None:
            warning('fetcher: No ip/port to open %s\n' % (self.user.tag()))
            return False

        debug('fetcher: open from %s: %s:%s\n' % (self.user.tag(), ip, port))

        if self.openingconnection == False or self.q.connect((ip, port), TP_CONNECT_TIMEOUT) == False:
            return False

        # The first write is seen by opposite side's RPC hander, not TCP_Queue
        prefix = '%s\n' %(TP_FETCH_RECORDS)
        self.q.write(prefix, writelength=False)

        self.q.write(fetcher.encode(firstmsg, -1, ''))

        # Close queue that is idle for a period of time. This is also the
        # maximum processing time for pending requests. Requests taking
        # longer than this must use other state tracking mechanisms.
        self.q.set_timeout(TP_FETCH_TIMEOUT)
        return True
예제 #4
0
 def periodic_poll(self, t, ctx):
     for (dev, state) in self.interfaces.items():
         (ip, bcast) = self.get_ip_address(dev)
         if (ip, bcast) != state:
             debug('IP changed for %s: %s %s\n' % (dev, ip, bcast))
             self.interfaces[dev] = (ip, bcast)
     return True
예제 #5
0
    def fetchhandler(self, q, msg, parameter):
        d = fetcher.decode(msg)
        if d == None:
            warning('fetch master: spurious msg\n')
            return False

        if self.user == None:
            uid = d.get('uid')
            if uid == None or type(uid) != str:
                warning('fetch slave: no uid in fetch connection\n')
                return False
            self.user = community.safe_get_user(uid, q.remote[0])
            if self.user == None:
                warning('fetch slave: Invalid uid from master: %s\n' % (uid))
                return False
            queuelist = self.add_connection(self.user)
            if len(queuelist) > MAX_QUEUES_PER_USER:
                warning('Not allowing too many connections from the same user: %s\n' % (self.user.tag()))
                return False
            debug('fetcher: connection from %s\n' % (self.user.tag()))
            return True

        if len(d['rt']) == 0:
            self.reqs.pop(d['rid'], None)  # Remove pending req (master side)

        fetcher.handle_msg(self.user, d)
        return True
예제 #6
0
    def _find_feature(self, feature, feature_cascade):
        """
        Finds the requested feature by its cascade (if exists).
        :param feature: Name of feature to find
        :param feature_cascade: Cascade filename for the requested feature (if exists).
        :return:
        """
        # For eyebrows and forehead
        if feature_cascade == "":
            return []

        feature_cascade = cv2.CascadeClassifier(
            os.path.join(OPENCV_PATH, feature_cascade))
        color = cv2.cvtColor(self.im, cv2.CV_8U)
        # Params are changeable, 1.05 (bigger is stricter), 3 (bigger is stricter)
        features = feature_cascade.detectMultiScale(color, 1.1, 2)
        if len(features) == 0 or (feature == 'eyes' and len(features) < 2):
            debug("Couldn't find feature - {}".format(feature))
            return []

        # Find both eyes
        if feature == 'eyes':
            eye_center_x = lambda coords: (coords[0] + coords[2]) / 2
            # Sort by eye center (x axis)
            eyes = sorted(features, key=eye_center_x)
            ret = [eyes[0], eyes[-1]]  # Left eye, right eye
        else:
            ret = [features[0]]  # [(x, y, w, h)]

        return ret
예제 #7
0
 def load_config(self):
     # make sure that dimensions are something sane
     window_width = max(300, self.width_setting.value)
     window_height = max(200, self.height_setting.value)
     # set size
     debug("Proximate GUI: Resizing main window to %ix%i\n" % (window_width, window_height))
     self.main_window.set_default_size(window_width, window_height)
예제 #8
0
 def update_user(self, user, what):
     # updates users profile picture if it's changed
     # and redraws the radar view
     if what and what[0] == PROFILE_ICON_CHANGED:
         debug('radar: User changed profile picture\n')
         if user in self.users:
             self.users[user][0] = self.scale_user_face(user, 2)
     self.queue_draw()
예제 #9
0
 def key_pressed_F6(self, target, ctx):
     # if fullscreen key is pressed
     if self.fullscreen_mode:
         self.main_window.unfullscreen()
         debug("GUI: Fullscreen mode OFF\n")
     else:
         self.main_window.fullscreen()
         debug("GUI: Fullscreen mode ON\n")
예제 #10
0
 def _load_heat_map(self):
     """
     Loads the heat map that was generated from the grad-CAM into self.heat_map
     """
     heat_map_path = "media/heat_map_{}.png".format(self.top1_label)
     heat_map = cv2.imread(heat_map_path)
     self.heat_map = cv2.resize(heat_map, (NET_WEIGHT, NET_HEIGHT))
     debug("Heat map loaded.")
예제 #11
0
    def find_significant_features(self):
        """
        Finds features in the face, gets their scores and filters the least significant ones.
        In the end, self.s_features will have a list of significant feature names, ordered from high to low.
        """
        # Load the heat map and blend with user image
        self._load_heat_map()
        self._create_blended()

        # Find signiicant features
        feature_coords = dict()
        for feature in FEATURES_CASCADES:
            feature_coords[feature] = self._find_feature(
                feature, FEATURES_CASCADES[feature])

        if not len(feature_coords):
            self.err_code = 6
            self.err_msg = "No significant features found."
            warning(self.err_msg)

        # Find eyebrows and forehead if eyes were found
        if len(feature_coords['eyes']) == 2:
            eyebrows = self._find_eyebrows(feature_coords['eyes'])
            # If found, find forehead and adjust eyes
            if len(eyebrows) == 2:
                x_eb_l, y_eb_l, w_eb_l, h_eb_l = eyebrows[0]
                x_eb_r, y_eb_r, w_eb_r, h_eb_r = eyebrows[1]
                y_eb = min(y_eb_l, y_eb_r)
                x_eb = x_eb_l
                w_eb = x_eb_r + w_eb_r - x_eb_l
                # Forehead
                feature_coords['forehead'] = [(x_eb, 0, w_eb, y_eb)]
                # Subtract half of eyebrows height from eyes
                x_e_l, y_e_l, w_e_l, h_e_l = feature_coords['eyes'][0]
                x_e_r, y_e_r, w_e_r, h_e_r = feature_coords['eyes'][1]
                h_e_l = y_e_l + h_e_l - int(y_eb_l + h_eb_l / 2)
                h_e_r = y_e_r + h_e_r - int(y_eb_r + h_eb_r / 2)
                y_e_l = int(y_eb_l + h_eb_l / 2)
                y_e_r = int(y_eb_r + h_eb_r / 2)
                feature_coords['eyes'] = [(x_e_l, y_e_l, w_e_l, h_e_l),
                                          (x_e_r, y_e_r, w_e_r, h_e_r)]
                feature_coords['eyebrows'] = eyebrows

        for feature in feature_coords:
            coords = feature_coords[feature]
            # Feature not found
            if not len(coords):
                continue
            debug("Feature coords: {}".format(coords))
            self.s_features.append((feature, self._get_feature_score(coords)))
        # Sort features from highest to lowest
        self.s_features.sort(reverse=True, key=lambda f: f[1][-1])
        debug("Found features before filtering: {}".format(self.s_features))
        # Filter out un-distinct features and leave only names
        self.s_features = [
            feature[0] for feature in self.s_features
            if self._is_distinct_feature(feature[1])
        ]
예제 #12
0
 def wait_for_temp_key(self):
     if not self.generating_temp:
         self.keylabel.set_markup('<span foreground="white" size="62000">%s</span>'
             %(self.keymanagement.temp_passphrase))
         self.notebook.set_current_page(1)
         debug('Key management: showing page 2\n')
         return False
     else:
         return True
예제 #13
0
def get_ip_address(ifname):
    fail = (None, None)
    no_conn = ('', None)

    try:
        sock = socket(AF_INET, SOCK_STREAM)
    except error, (errno, strerror):
        debug('ioutils error (%s): %s\n' %(errno, strerror))
        return fail
예제 #14
0
    def fetch_cb(self, user, req, success):
        if success:
            return

        if req.retry():
            debug('Retrying fetch to %s\n' % user.tag())
            self.fetch(user, req)
        else:
            req.call(user, None)
예제 #15
0
    def handle_ack(self, d):
        for frag in d['ack']:
            if frag in self.fragments:
                debug('%d acked %d\n' % (self.packet, frag))
                self.fragments.pop(frag)

        if len(self.fragments) == 0:    # All fragments acked
            debug('%d sent!\n' % (self.packet))
            self.cleanup(True)
예제 #16
0
 def cleanup(self, msg):
     if self.user == None:
         return
     queuelist = fetchqueues.get(self.user)
     try:
         queuelist.remove(self)
     except ValueError:
         pass
     debug('fetcher: connection to %s closed: %s\n' % (self.user.tag(), msg))
예제 #17
0
 def close_gui(self):
     if not self.is_visible:
         return
     debug("keymanagement: Closing GUI\n")
     self.user = None
     if self.request_dialog:
         self.request_dialog.base.destroy()
     self.keymanagement.progress_update(None)
     self.main_gui.hide_page(self)
예제 #18
0
 def plugin_to_gui(self, user, request, isinitiator):
     nick = user.get('nick')
     if request == self.keymanagement.KM_REQUEST_KEY:
         self.user = user
         if not self.keymanagement.community.get_myself().get("key_fname"):
             # we do not have a permanent key to exhange
             self.request_dialog = Approve_Deny_Dialog(
                 self.main_gui.get_main_window(),
                 'Key Management',
                 '%s requests to exchange keys,\nbut you don\'t have a key.' %(nick),
                 self.dialog_response_request, user)
             self.request_dialog.base.action_area.get_children()[0].set_property(
                 "sensitive", False)
         else:
             self.request_dialog = Approve_Deny_Dialog(
                 self.main_gui.get_main_window(),
                 'Key Management',
                 '%s requests to exchange keys.\nAccept?' %(nick),
                 self.dialog_response_request, user)
     elif request == self.keymanagement.KM_REQUEST_ACK:
         self.open_gui()
         self.notebook.set_current_page(0)
         self.messagelabel.set_text('Waiting for an answer...')
         debug('Key management: showing page 1\n')
     elif request == self.keymanagement.KM_REQUEST_OK:
         timeout = timeout_add(100, self.wait_for_temp_key)
     elif request == self.keymanagement.KM_REQUEST_DENIED:
         self.notification.ok_dialog('Key Management', '%s denied your request to exchange keys.' %(nick))
         self.close_gui()
     elif request == self.keymanagement.KM_REQUEST_ANSWER_ACK:
         self.open_gui()
         self.notebook.set_current_page(2)
         self.key_eventbox.grab_focus()
         self.entered_key = ''
         self.set_keyentry_text()
         debug('Key management: showing page 3\n')
     elif request == self.keymanagement.KM_TEMP_KEY_ACK:
         self.messagelabel.set_text('Sending keys...')
         self.notebook.set_current_page(0)
     elif request == self.keymanagement.KM_TEMP_KEY1:
         self.messagelabel.set_text('Sending keys...')
         self.notebook.set_current_page(0)
     elif request == self.keymanagement.KM_FINISHED:
         self.notification.ok_dialog('Key Management', 'Successful key exchange!')
         self.close_gui()
     elif request == self.keymanagement.KM_CANCEL:
         self.notification.ok_dialog('Key Management', 'Key exchange canceled')
         self.close_gui()
     elif request == self.keymanagement.KM_ERROR:
         s = 'Error during key exchange!'
         if not isinitiator:
             s += '\nYou may possibly have written an incorrect code.'
         self.notification.ok_dialog('Key Management', s)
         self.close_gui()
     elif request == self.keymanagement.KM_REQUEST_NACK:
         self.notification.ok_dialog('Key Management', '%s is busy' %(nick))
예제 #19
0
 def send_ack(self):
     debug('%d send ack %s\n' % (self.packet, str(self.fragments.keys())))
     data = bencode({
         't': PACKET_ACK,
         'from': community.get_myuid(),
         'to': self.user.get('uid'),
         'packet': self.packet,
         'ack': self.fragments.keys()
         })
     plugin.send_lowlevel(self.user, data)
예제 #20
0
    def request_message(self, user, msgid, max_depth=0):
        """ Request message from user using msgid. Parameter
            'max_depth' defines the maximum amount of ancestors
            that are requested after this message. """

        if not self.chatcontext:
            return

        debug('Requesting message %s\n' % hexlify(msgid))
        request = {'t': 'request', 'msgid': msgid}
        self.fetcher.fetch(user, PLUGIN_TYPE_MESSAGING, request, self.got_request_reply, ctx=max_depth)
예제 #21
0
def create_tcp_socket(address, port, reuse = False):
    """ If port != 0, create and bind listening socket on the given
    port and address. Otherwise create a socket that can be connected.

    Returns the socket when successful, otherwise None."""

    try:
        sock = socket(AF_INET, SOCK_STREAM)
    except error, (errno, strerror):
        debug('ioutils error (%s): %s\n' %(errno, strerror))
        return None
예제 #22
0
    def take_photo(self):
        """ This function tells the image_sink to handoff a picture buffer
            to save_buffer_cb.

            Parameters:
            cb: Callback function to be called after the photo is taken.
                Parameter will be a buffer with the image data.
        """
        debug("Taking photo!\n")
        # connect handoff signal to give the buffer to a function
        self.buffer_cb_id = self.image_sink.connect("handoff", self.save_buffer_cb)
예제 #23
0
    def icd_reply(self, *args):
        ip = str(args[1])
        ipnumber = ip_number(ip)
        netmasknumber = ip_number(args[2])

        invmask = ((1 << 32) - 1) ^ netmasknumber

        # Assume broadcast address is the highest number address in the subnet
        bcast = ip_string(ipnumber | invmask)

        debug('ICd: IP reply: %s %s\n' % (ip, bcast))
        self.interfaces[ICD_INTERFACE] = (ip, bcast)
예제 #24
0
 def send_request(self, user, request, payload=''):
     cuser = self.current['user']
     if cuser == None:
         return
     if user != cuser:
         warning('keymanagement: Sending message to wrong uid %s while current uid is %s\n' % (user.get('uid'), cuser.get('uid')))
     self.current['state'] = request
     nick = user.get('nick')
     debug('Key management: sending request %s to %s\n' %(request, nick))
     request = {'t': request, 'uid': self.my_uid, 'param': payload}
     self.fetcher.fetch(user, PLUGIN_TYPE_KEY_MANAGEMENT, request,
         self.request_cb, user.get('uid'))
예제 #25
0
    def run(self, user):
        if self.user:
            return
        self.user = user

        debug('Opening GUI to exchange keys with %s\n' %(user.get('nick')))
        if not self.keymanagement.community.get_myself().get("key_fname"):
            # we do not have a permanent key to exhange
            self.notification.ok_dialog('Key Management', 'You do not have a key to exchange.')
            return

        self.keymanagement.send_exchange_request(user)
        self.notification.notify('Key Management: Sending request to exchange keys with %s...' %(user.get('nick')))
예제 #26
0
 def run(self):
     # get the last opened community from the main gui
     open_community = self.community.get_default_community()
     for page in reversed(self.main_gui.page_history):
         com = page.get_community()
         if com != None:
             open_community = com
             break
     cname = open_community.get('name')
     debug('radar: Opening radar view for community %s\n' % cname)
     self.page.set_page_title(cname, sub=True)
     self.user_radar.set_community(open_community)
     self.main_gui.show_page(self.page)
예제 #27
0
def create_udp_socket(address, port, bcast, reuse = False):
    """ If port != 0, create and bind listening socket on port
    and address. If bcast == True, create a broadcast socket.

    Returns the socket when successful, otherwise None."""

    if port != 0 and bcast:
        debug('create_udp_socket: both port != 0 and bcast == True may not be true\n')
        return None

    try:
        sock = socket(AF_INET, SOCK_DGRAM)
    except error, (errno, strerror):
        debug('ioutils error (%s): %s\n' %(errno, strerror))
        return None
예제 #28
0
def connect_socket(sock, name, port):
    while True:
        try:
            sock.connect((name, port))
        except error, (errno, strerror):
            # 1. Connection now in progress (EINPROGRESS)
            # 2. Connection refused (ECONNREFUSED)
            debug('connect_socket: %s %d: %s\n' %(name, port, strerror))
            if errno == EINTR:
                continue
            return errno == EINPROGRESS
        except gaierror, (errno, strerror):
            # Unknown host name
            debug('connect_socket: %s %d: %s\n' %(name, port, strerror))
            return False
예제 #29
0
    def close(self, status = TCPQ_EOF, msg = ''):
        debug('TCP_Queue closed: status %d (%s)\n' %(status, msg))
        self.inb = ''
        self.outb = ''
        self.send_handler = None
        self.recv_handler = None
        self.throttled = False
        self.status = status
        self.remove_io_notifications()

        if self.sock != None:
            self.sock.close()
            self.sock = None

        if self.closehandler != None:
            self.closehandler(self, self.parameter, msg)
예제 #30
0
 def set_results(self, im, pred_id, pred_label, all_preds, hm_lvl):
     """
     Populates the "Results" object.
     These atributes will be used for the results display to the user.
     :param im: User cropped image (ndarray)
     :param pred_id: Top predicted id (name) by the network
     :param pred_label: Top predicted label (number) by the network
     :param all_preds: Other predictions (based on number of results requested)
     :param hm_lvl: Filter level to apply on the heat map
     """
     self.im = cv2.resize(im, (NET_WEIGHT, NET_HEIGHT))
     self.top1_id = pred_id
     self.top1_label = pred_label
     self.top_other_ids = all_preds
     self.hm_lvl = hm_lvl
     debug("Results set.")
예제 #31
0
    def status_changed_handler(self, iap, bearer, state, *args):
        """ Handles connection events received from Internet
            Connectivity daemon. """

        if state == 'CONNECTED':
            debug('ICd: connected\n')
            self.interfaces[ICD_INTERFACE] = (None, None)
            try:
                self.icd.get_ipinfo(dbus_interface='com.nokia.icd',
                                    reply_handler=self.icd_reply,
                                    error_handler=self.icd_error)
            except DBusException:
                pass

        elif state == 'DISCONNECTING':
            debug('ICd: disconnecting\n')
            self.interfaces[ICD_INTERFACE] = (None, None)
예제 #32
0
    def handle_icon_request(self, user, request):
        iconid = request.get('iconid')
        if iconid == None or type(iconid) != str:
            return None

        debug('Icon request from %s: %s\n' % (user.get('nick'), iconid))

        if iconid == 'user':
            icon = read_file_contents(seek_face_name(self.myself))
            version = self.myself.get('faceversion')
            limiter = self.iconfetchlimiters['user']

        elif iconid.startswith('c:'):
            cname = iconid[2:]
            if not valid_community(cname):
                return None
            if cname not in self.myself.get('communities'):
                return None
            com = self.get_ordinary_community(cname)
            if com == None:
                return None
            if com.get('myiconversion') != com.get('iconversion'):
                # Do not reply with a old version of the icon!
                return
            icon = read_file_contents(seek_community_icon_name(com))
            version = com.get('iconversion')
            limiter = self.iconfetchlimiters.get(iconid)
            if limiter == None:
                limiter = Rate_Limiter(ICON_PUSH_INTERVAL)
                self.iconfetchlimiters[iconid] = limiter
        else:
            return None

        if icon == None:
            icon = ''
        if version == None:
            version = 0

        request = {'t': 'iconpush', 'iconid': iconid, 'icon': icon, 'version': version}

        if normal_traffic_mode():
            self.fetcher.fetch(user, PLUGIN_TYPE_COMMUNITY, request, None, ack=False)
        elif limiter == None or limiter.check():
            self.fetcher.fetch_community(self.get_default_community(), PLUGIN_TYPE_COMMUNITY, request, None, ack=False)

        return {}
예제 #33
0
    def got_metadata_for_file(self, metas, ctx):
        (user, guiname) = ctx

        filesharing.progress_update(None)

        if len(metas) == 0:
            msg = 'No metadata for file: %s' %(guiname)
            notification.ok_dialog('Filesharing', msg)
            return

        if len(metas) != 1:
            debug('FileSharing: Found too many metadatas for file\n')
            return

        (shareid, fname, meta) = metas[0]

        Show_Metadata_Dialog(main_gui.get_main_window(), guiname, meta)
예제 #34
0
    def handle_request(self, user, msgid):

        # find conversation with msgid
        c = None
        for ci in self.conversations.values():
            if ci.has_msgid(msgid):
                c = ci
                break

        # if we don't have a stored conversation with requesting user
        if not c:
            debug('Unknown user\n')
            return None

        target_addr = c.target_addr

        msg = c.get_msg(msgid)

        # if we don't have the requested message
        if not msg:
            return None

        # if we get a request for a message of community conversation, we
        # require the community membership of the requesting user
        if c.is_community():
            if not (c.tag() in user.get('communities')):
                warning('User \'%s\' is not a member of the target community\n' % user.get('uid'))
                return None

        # check if UID fails
        if not ((msg.get_target_addr() == target_addr) or
                (msg.get_sender_addr() == target_addr)):
            debug('UID fail\n')
            return None

        if msg.error:
            msg.error = False
            for cb in self.change_message_cb:
                cb(c, msg)

        children = c.get_children(msgid, [])

        # XXX: Remove 't' field after some months
        return {'t': 'request_reply', 'msg': msg.to_list(), 'children': children}
예제 #35
0
 def _get_feature_score(self, coords):
     """
     Returns a sum of weighted average (per channel) of the feature area on the heat map.
     If a channel is to be filtered, based on hm_lvl, it is not calculated in the score.
     The higher the score, the more significant the feature is.
     :param coords: Feature rectangle coordinates.
     :return: A list of weighted averages per channel (non-filtered ones) [..., avg(G), avg(R)]
     """
     # Weighted average pixel of area detected, by channel
     # B, G, R channels are multiplied by 1, 2, 3 respectively
     # Results in: [[avg(B1), avg(G1), avg(R1)], ... , [avg(Bn), avg(Gn), avg(Rn)]]
     wavgs = [[
         np.average(self.heat_map[y:y + h, x:x + w, c]) * (c + 1)
         for c in range(self.hm_lvl, NUM_CHANNELS)
     ] for x, y, w, h in coords]
     debug("Weighted channel averages before summation: {}".format(wavgs))
     # Other options to consider:
     # Not weighted average:
     # avgs = [[np.average(self.heat_map[y: y + h, x: x + w, c]) for c in range(self.hm_lvl, NUM_CHANNELS)] for x, y, w, h in coords]
     # BGR channels weighted by powers of 1, 2, 3 respectively
     # pavgs = [[np.average(self.heat_map[y: y + h, x: x + w, c]) * ((c + 1) ** 2) for c in range(self.hm_lvl, NUM_CHANNELS)] for x, y, w, h in coords]
     return np.average(wavgs, axis=0)
예제 #36
0
def add_task(RNN_model,
             model_name='RNN',
             activation='tanh',
             length=400,
             nb_layers=1,
             nb_hid=128,
             nb_epoch=10,
             batch_size=32,
             learning_rate=0.01,
             clipnorm=1000,
             initializer_func=None,
             hybrid_nunits=0,
             hybrid_type=LSTM,
             recurrent_initializer='orthogonal',
             gate_regularizer=None,
             learn_retention_ratio=False,
             load_model=False,
             N_train=20000,
             N_test=1000,
             model_path=None,
             max_entries=3):
    """Perform the adding task

    Parameters
    ----------
    RNN_model : handle to the class
    model_name: Just a name
    length : int
    nb_layers : int
        number of recurrent layers
    nb_hid : int
        number of hidden units per layer
    nb_epoch : int
        total number of epoch
    batch_size : int
        the batch size
    learning_rate : float
        learning rate of RMSprop optimizer
    clipnorm : float
        gradient clipping, if >0
    """
    #    model_path = os.path.join('./results/masked_addition/',
    #                              model_name + '_' + str(nb_hid) + '_' + str(nb_layers) + '.h5')
    model_pic = os.path.join(model_path, model_name + "-model-pic.png")

    # ----- print mode info -----
    info("Model Name: ", model_name)
    info("Number of epochs: ", nb_epoch)
    info("Batch Size: ", batch_size)
    info("Number of layers: ", nb_layers)
    info("Number of hidden units: ", nb_hid)
    info("Activation: ", activation)
    info("Recurrent initializer: ", recurrent_initializer)

    # ----- prepare data -----
    # identify data format
    if K.backend() == "tensorflow":
        K.set_image_data_format("channels_last")
    else:
        K.set_image_data_format("channels_first")
    data_format = K.image_data_format()

    X_train, Y_train, X_test, Y_test = load_adding_problem(
        length=length, N_train=N_train, N_test=N_test, max_entries=max_entries)

    info("Basic dataset statistics")
    info("X_train shape:", X_train.shape)
    info("Y_train shape:", Y_train.shape)
    info("X_test shape:", X_test.shape)
    info('Y_test shape:', Y_test.shape)

    # setup sequence shape
    input_shape = X_train.shape[1:]

    # ----- Build Model -----
    img_input = Input(shape=input_shape)

    if initializer_func == None:
        initializer_func = keras.initializers.Identity(gain=1.0)

    x = define_model(RNN_model=RNN_model,
                     hybrid_nunits=hybrid_nunits,
                     h_dim=nb_hid,
                     op_dim=1,
                     num_layers=nb_layers,
                     activation=activation,
                     op_activation='linear',
                     recurrent_initializer=recurrent_initializer,
                     ip=img_input,
                     GRNN=hybrid_type,
                     op_type='sample',
                     learn_retention_ratio=learn_retention_ratio,
                     gate_regularizer=gate_regularizer)

    # compile model
    info('Compiling model...')
    model = Model(img_input, x)
    model.summary()

    if not os.path.isfile(model_path):
        if load_model == True:
            debug('File does not exist. Creating new.')
    else:
        model.load_weights(model_path, by_name=True)

    # ----- Configure Optimizer -----


#    opt = RMSprop(lr=learning_rate, clipnorm=clipnorm)
    opt = SGD(lr=learning_rate, clipnorm=clipnorm)
    model.compile(loss='mse', optimizer=opt, metrics=['mse'])

    print("[MESSAGE] Model is compiled.")

    # Callbacks
    early_stop = EarlyStopping(monitor="val_loss", patience=25, verbose=1)
    print_model_name = LambdaCallback(on_epoch_begin=lambda batch, logs: info(
        'Running ' + model_name + ', Add task length = ' + str(length)))
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.9,
                                  patience=2,
                                  verbose=1,
                                  mode='auto',
                                  min_delta=0.0001,
                                  cooldown=0,
                                  min_lr=1e-6)
    stoponloss = StopOnLoss(monitor='loss', value=1e-3, verbose=1)
    checkpoint = MyModelCheckpoint(model_path,
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True,
                                   save_weights_only=False,
                                   mode='min')

    # ----- Training Model -----
    # Fit the model on the batches generated by datagen.flow().
    history = model.fit(X_train,
                        Y_train,
                        batch_size=batch_size,
                        epochs=nb_epoch,
                        validation_data=(X_test, Y_test),
                        callbacks=[
                            reduce_lr, early_stop, print_model_name,
                            checkpoint, stoponloss
                        ])

    return history
예제 #37
0
def train_rnn(path_root,
              start_tlen,
              start_slen,
              start_klen,
              Tlen_max,
              Slen_max,
              Klen_max,
              model_name='LSTM',
              **kwargs):

    import pickle
    rnn_model = eval(model_name)
    niters = 0

    curr_klen = start_klen
    curr_slen = start_slen
    curr_tlen = start_tlen

    def incr_tlen(l):
        incr_l = min(8, l / 5)  # cap large steps to 10
        incr_l = max(incr_l, 1)  # step at least 1
        nxt_l = int(incr_l) + l
        print(f'Increment Tlen. old_length is {l}. Next length is {nxt_l}')
        if nxt_l <= Tlen_max:
            return nxt_l
        else:
            return Tlen_max + 1

    def incr_slen(l):
        incr_l = min(3, l / 3)  # cap large steps to 3
        incr_l = max(incr_l, 1)  #avoid fraction
        nxt_l = int(incr_l) + l
        print(f'Increment Slen. old_length is {l}. Next length is {nxt_l}')
        if nxt_l <= Slen_max:
            return nxt_l
        else:
            return Slen_max + 1

    def incr_klen(l):
        nxt_l = round(l + l / 2)
        print(f'Increment klen. old_length is {l}. Next length is {nxt_l}')
        if nxt_l <= Klen_max:
            return nxt_l
        else:
            return Klen_max + 1

    start_lr = kwargs['learning_rate']

    # not sure stepping through K is a good idea.
    # while curr_slen <= Slen_max:
    iteration = 0
    logs = {}
    curr_klen = start_klen
    while curr_slen <= Slen_max:
        if curr_tlen > Tlen_max:
            curr_tlen = Tlen_max
        while curr_tlen <= Tlen_max:
            info(' Current Klen = ' + str(curr_klen) + ' Slen = ' +
                 str(curr_slen) + ' Tlen = ' + str(curr_tlen))
            hist[model_name] = copy_task(rnn_model,
                                         model_name=model_name,
                                         Tlen=curr_tlen,
                                         Slen=curr_slen,
                                         Klen=curr_klen,
                                         **kwargs)
            # save logs
            logs[iteration] = [
                hist[model_name].history['loss'], curr_tlen, curr_slen,
                curr_klen
            ]
            pickle_out = open(f"{path_root}{model_name}", "wb")
            pickle.dump(logs, pickle_out)
            iteration += 1

            if hist[model_name].history['categorical_accuracy'][-1] < 0.96:
                debug('Accuracy not high enough yet.')
                debug('Repeat Klen = ' + str(curr_klen) + ' Slen = ' +
                      str(curr_slen) + ' Tlen = ' + str(curr_tlen))
                if kwargs['learning_rate'] > 1e-6:
                    kwargs['learning_rate'] = kwargs['learning_rate'] / 2
                    debug('Lower max learning rate to ',
                          kwargs['learning_rate'])
                else:
                    debug('Convergence failed. Ending')
                    return
                niters = niters + 1
                debug('Number of iterations = ', niters)
            else:
                niters = 0
                printer('Done! Save specs and increase complexity.')
                curr_tlen = incr_tlen(curr_tlen)
                kwargs['learning_rate'] = start_lr
                with open(best_specs_path, 'wb') as handle:
                    info('Dumping next iteration specs.')
                    pickle.dump((curr_klen, curr_slen, curr_tlen),
                                handle,
                                protocol=pickle.HIGHEST_PROTOCOL)
        curr_slen = incr_slen(curr_slen)
        printer('Klen = ' + str(curr_klen) + ' Slen = ' + str(curr_slen) +
                ' Tlen = ' + str(curr_tlen))

        if (curr_slen == Slen_max) and (curr_klen
                                        == Klen_max) and (curr_tlen
                                                          == Tlen_max):
            printer('FINISHED!!!!')
            printer('Klen = ' + str(curr_klen) + ' Slen = ' + str(curr_slen) +
                    ' Tlen = ' + str(curr_tlen))
            return
예제 #38
0
def copy_task(RNN_model,
              model_name='RNN',
              activation='tanh',
              nb_layers=1,
              nb_hid=128,
              nb_epoch=3,
              batch_size=32,
              learning_rate=0.1,
              nbatches=100,
              clipnorm=10,
              momentum=0,
              initializer_func=None,
              hybrid_nunits=0,
              hybrid_type=LSTM,
              Tlen=100,
              Slen=20,
              Klen=128,
              NClasses=1024,
              recurrent_initializer='orthogonal',
              learn_retention_ratio=False,
              load_model=False,
              model_path=None):
    """Perform LSTM The copying Problem experiment.

    Parameters
    ----------
    RNN_model : handle to the class
    model_name: Just a name

    nb_layers : int
        number of recurrent layers
    nb_hid : int
        number of hidden units per layer
    nb_epoch : int
        total number of epoch
    batch_size : int
        the batch size
    learning_rate : float
        learning rate of RMSprop optimizer
    clipnorm : float
        gradient clipping, if >0
    """

    # the model layout picture
    model_pic = os.path.join(model_path, model_name + "-model-pic.png")

    # ----- print mode info -----
    #    info("Model Name: ", model_name)
    #    info("Number of epochs: ", nb_epoch)
    #    info("Batch Size: ", batch_size)
    #    info("Number of layers: ", nb_layers)
    #    info("Number of hidden units: ", nb_hid)
    #    info("Activation: ", activation)
    #    info("Recurrent initializer: ", recurrent_initializer)

    train_gen = data_generator(nsamples=batch_size,
                               Tlen=Tlen,
                               Slen=Slen,
                               Klen=Klen,
                               NClasses=NClasses)
    val_gen = data_generator(nsamples=int(batch_size / 2),
                             Tlen=Tlen,
                             Slen=Slen,
                             Klen=Klen,
                             NClasses=NClasses)
    X_train, Y_train, _ = next(train_gen)
    X_test, Y_test, _ = next(val_gen)

    #    info("Basic dataset statistics")
    #    info("X_train shape:", X_train.shape)
    #    info("Y_train shape:", Y_train.shape)
    #    info("X_test shape:", X_test.shape)
    #    info('Y_test shape:', Y_test.shape)

    # setup sequence shape
    input_shape = X_train.shape[1:]

    # ----- Build Model -----
    img_input = Input(shape=input_shape)

    if initializer_func == None:
        initializer_func = keras.initializers.Identity(gain=1.0)

    x = define_model(RNN_model=RNN_model,
                     hybrid_nunits=hybrid_nunits,
                     h_dim=nb_hid,
                     op_dim=NClasses,
                     num_layers=nb_layers,
                     activation=activation,
                     op_activation='softmax',
                     recurrent_initializer=recurrent_initializer,
                     ip=img_input,
                     GRNN=hybrid_type,
                     op_type='seq',
                     learn_retention_ratio=learn_retention_ratio)

    # compile model
    print("[MESSAGE] Compiling model...")
    model = Model(img_input, x)
    model.summary()
    if not os.path.isfile(model_path):
        if load_model == True:
            debug('File does not exist. Creating new.')
    else:
        model.load_weights(model_path, by_name=True)

    # ----- Configure Optimizer -----
    # rmsprop = RMSprop(lr=learning_rate, clipnorm=clipnorm)
    opt = SGD(lr=learning_rate, clipnorm=clipnorm, momentum=momentum)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=[keras.metrics.categorical_accuracy],
                  sample_weight_mode="temporal")
    print("[MESSAGE] Model is compiled.")

    # Callbacks
    early_stop = EarlyStopping(
        monitor="loss",
        patience=10,
        verbose=1,
        min_delta=0.0001,
    )
    print_model_name = LambdaCallback(on_epoch_begin=lambda batch, logs: info(
        'Running ' + model_name + ', Copy task Slen = ' + str(
            Slen) + ', Klen = ' + str(Klen) + ', Tlen = ' + str(Tlen)))

    reduce_lr = ReduceLROnPlateau(monitor='loss',
                                  factor=0.5,
                                  patience=5,
                                  verbose=1,
                                  mode='auto',
                                  min_delta=0.0001,
                                  cooldown=0,
                                  min_lr=1e-7)

    checkpoint = MyModelCheckpoint(model_path,
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True,
                                   save_weights_only=False,
                                   mode='min')

    stoponacc = StopOnAcc(monitor='categorical_accuracy',
                          value=0.99,
                          verbose=1)
    stoponloss = StopOnLoss(monitor='loss', value=1e-5, verbose=1)

    # ----- Training Model -----
    history = model.fit_generator(generator=train_gen,
                                  validation_data=val_gen,
                                  epochs=nb_epoch,
                                  steps_per_epoch=nbatches,
                                  validation_steps=1,
                                  callbacks=[
                                      reduce_lr, early_stop, print_model_name,
                                      checkpoint, stoponacc, stoponloss
                                  ])

    # Testing just for visualization
    test_gen = data_generator(nsamples=1,
                              Tlen=Tlen,
                              Slen=Slen,
                              Klen=Klen,
                              NClasses=NClasses)
    X_test, Y_test, _ = next(test_gen)
    pred_ = model.predict(X_test)
    prediction = np.argmax(pred_, axis=-1)
    tgt = np.argmax(Y_test, axis=-1)

    # Plotting the results
    # print('Plotting for case ' + model_name)
    # plt.figure()
    # plt.plot(prediction[0, :], 'r', alpha=0.6, label=model_name + 'Prediction')
    # plt.legend()
    # plt.plot(tgt[0, :], 'k', label='Target')
    # plt.legend()
    # plt.show()
    return history
예제 #39
0
def train_rnn(path,
              model_name='LSTM',
              max_len=50000,
              start_len=5,
              max_incr=1e100,
              max_entries=3,
              stop_lr=1e-4,
              **kwargs):

    logs = {}

    curr_len = start_len
    rnn_model = eval(model_name)
    start_lr = kwargs['learning_rate']
    if not os.path.isfile(model_path):
        hist[rnn_model_name] = add_task(rnn_model,
                                        model_name=rnn_model_name,
                                        length=curr_len,
                                        load_model=False,
                                        **kwargs)
    iteration = 0
    while curr_len < max_len:
        curr_len = int(curr_len)
        hist[rnn_model_name] = add_task(rnn_model,
                                        model_name=rnn_model_name,
                                        length=curr_len,
                                        load_model=True,
                                        max_entries=max_entries,
                                        **kwargs)
        # Save logs
        logs[iteration] = [hist[rnn_model_name].history['loss'], int(curr_len)]
        pickle_out = open(
            f"{path}{model_name}_H{nb_hid}_logs_s{start_len}_m{max_len}_me{max_entries}",
            "wb")
        pickle.dump(logs, pickle_out)
        iteration = iteration + 1

        if hist[rnn_model_name].history['loss'][-1] < 0.01:
            printer('Error requirements met, increasing length')
            incr_len = min(max_incr, round(curr_len / 5))
            incr_len = max(incr_len, 10)  # at least increment by 10
            curr_len = curr_len + incr_len
            kwargs['learning_rate'] = start_lr
        else:
            kwargs['learning_rate'] = kwargs['learning_rate'] / 2
            debug(
                f'Error requirements not met, maintain length, lower lr to {kwargs["learning_rate"]}'
            )
            if (hist[rnn_model_name].history['loss'][-1] <
                    0.16) & (kwargs['learning_rate'] < stop_lr):
                printer(
                    'Better than chance, but convergence stopped, increasing length'
                )
                incr_len = min(max_incr, round(curr_len / 5))
                incr_len = max(incr_len, 10)  # at least increment by 10
                curr_len = curr_len + incr_len
                kwargs['learning_rate'] = start_lr
            elif kwargs['learning_rate'] < stop_lr:
                printer('Convergence stopped. Ending')
                return

    if not (curr_len == max_len):
        hist[rnn_model_name] = add_task(rnn_model,
                                        model_name=rnn_model_name,
                                        length=max_len,
                                        load_model=True,
                                        max_entries=max_entries,
                                        **kwargs)
    return logs
예제 #40
0
    def _find_eyebrows(self, eyes_coords):
        """
        Eyebrows do not have a cascade, so we find them using edge detection.
        :param eyes_coords: A list with rectangle coordinates for both eyes found [left_eye, right_eye].
        :return: A list with eyebrows coordinates found, or empty if found less than 2.
        """
        ret = []
        for x_e, y_e, w_e, h_e in eyes_coords:
            edges = cv2.Canny(self.im[y_e:y_e + h_e, x_e:x_e + w_e], 50, 200)
            edges_ones = edges.nonzero()
            # Check for a minimum of existing edges
            if len(edges_ones) < 2 or len(edges_ones[0]) < 10 or len(
                    edges_ones[1]) < 5:
                debug("Not enough edges found for eyebrows.")
                return []
            # Found enough edges
            x_eb, w_eb = x_e, w_e  # Same as eye coords
            # Find top of eyebrow
            y_eb = min(edges_ones[0])
            # Find bottom of eyebrow
            min_index = np.argmin(edges_ones[0])
            mid_x = edges_ones[1][min_index]
            curr_x = mid_x
            curr_y = y_eb

            is_in_bounds = lambda x, y: x >= 0 and y >= 0 and x < len(edges[
                0]) and y < len(edges)

            # Find left bottom
            while is_in_bounds(curr_x, curr_y) and edges[curr_y][curr_x]:
                # Try left down
                if is_in_bounds(curr_x - 1,
                                curr_y + 1) and edges[curr_y + 1][curr_x - 1]:
                    curr_y += 1
                    curr_x -= 1
                    continue
                # Try only down
                if is_in_bounds(curr_x,
                                curr_y + 1) and edges[curr_y + 1][curr_x]:
                    curr_y += 1
                    continue

                # Try only left
                if is_in_bounds(curr_x - 1,
                                curr_y) and edges[curr_y][curr_x - 1]:
                    curr_x -= 1
                    continue
                # Can't go further, give some slack
                curr_x -= 1
                curr_y += 1
            h_l = curr_y - y_eb

            # Find right bottom
            curr_x = mid_x
            curr_y = y_eb
            while is_in_bounds(curr_x, curr_y) and edges[curr_y][curr_x]:
                # Try right down
                if is_in_bounds(curr_x + 1,
                                curr_y + 1) and edges[curr_y + 1][curr_x + 1]:
                    curr_y += 1
                    curr_x += 1
                    continue
                # Try only down
                if is_in_bounds(curr_x,
                                curr_y + 1) and edges[curr_y + 1][curr_x]:
                    curr_y += 1
                    continue

                # Try only right
                if is_in_bounds(curr_x + 1,
                                curr_y) and edges[curr_y][curr_x + 1]:
                    curr_x += 1
                    continue
                # Can't go further, give some slack
                curr_x += 1
                curr_y += 1
            h_r = curr_y - y_eb

            # Find minimum of eyebrow tips
            h_eb = max(h_r, h_l)
            # y_eb was in eye section coordinates
            y_eb += y_e
            ret += [(x_eb, y_eb, w_eb, h_eb)]

        return ret
예제 #41
0
def add_task(RNN_model,
             model_name='RNN',
             activation='tanh',
             length=400,
             nb_layers=1,
             nb_hid=128,
             initializer_func=None,
             hybrid_nunits=0,
             hybrid_type=LSTM,
             recurrent_initializer='orthogonal',
             load_model=False,
             N_train=1,
             N_test=1,
             model_path=None,
             max_entries=3):
    """Perform the adding task

    Parameters
    ----------
    RNN_model : handle to the class
    model_name: Just a name
    length : int
    nb_layers : int
        number of recurrent layers
    nb_hid : int
        number of hidden units per layer
    nb_epoch : int
        total number of epoch
    batch_size : int
        the batch size
    learning_rate : float
        learning rate of RMSprop optimizer
    clipnorm : float
        gradient clipping, if >0
    """
    #    model_path = os.path.join('./results/masked_addition/',
    #                              model_name + '_' + str(nb_hid) + '_' + str(nb_layers) + '.h5')
    model_pic = os.path.join(model_path, model_name + "-model-pic.png")

    # ----- print mode info -----
    info("Model Name: ", model_name)
    info("Number of layers: ", nb_layers)
    info("Number of hidden units: ", nb_hid)
    info("Activation: ", activation)
    info("Recurrent initializer: ", recurrent_initializer)

    # ----- prepare data -----
    # identify data format
    if K.backend() == "tensorflow":
        K.set_image_data_format("channels_last")
    else:
        K.set_image_data_format("channels_first")
    data_format = K.image_data_format()

    X_train, Y_train, _, _ = load_adding_problem(length=length,
                                                 N_train=N_train,
                                                 N_test=N_test,
                                                 max_entries=max_entries,
                                                 save=False,
                                                 load=False)

    info("Basic dataset statistics")
    info("X_train shape:", X_train.shape)
    info("Y_train shape:", Y_train.shape)

    # setup sequence shape
    input_shape = X_train.shape[1:]

    # ----- Build Model -----
    img_input = Input(shape=input_shape)

    if initializer_func == None:
        initializer_func = keras.initializers.Identity(gain=1.0)

    x = define_model(RNN_model=RNN_model,
                     hybrid_nunits=hybrid_nunits,
                     h_dim=nb_hid,
                     op_dim=1,
                     num_layers=nb_layers,
                     activation=activation,
                     op_activation='linear',
                     recurrent_initializer=recurrent_initializer,
                     ip=img_input,
                     GRNN=hybrid_type,
                     op_type='sample',
                     learn_retention_ratio=True)

    # compile model
    info('Compiling model...')
    model = Model(img_input, x)
    model.summary()

    if not os.path.isfile(model_path):
        debug('ALERT - Model file does not exist. Ending.', model_path)
        return
    else:
        model.load_weights(model_path, by_name=True)

    # ---- Record activations -----
    info(
        '---------------------- Collecting the activations -------------------'
    )
    layer_outputs = [layer.output
                     for layer in model.layers[1:]]  # 0 is the input layer
    activation_model = Model(img_input, layer_outputs)
    activations = activation_model.predict(X_train)

    # printing weights of the network
    names = [weight.name for layer in model.layers for weight in layer.weights]
    weights = model.get_weights()
    idx = 0
    for name, weight in zip(names, weights):
        print(name, weight.shape, idx)
        idx += 1

    idx = 0

    if (RNN_model == LSTM):
        print('ALERT: Makes assumptions about format. This may break!')
        kernel = weights[0]
        recurrent_kernel = weights[1]
        bias = weights[2]
        dense_kernel = weights[3]
        dense_bias = weights[4]

        # ------- Plotting gates ------
        units = nb_hid
        kernel_i = kernel[:, :units]
        kernel_f = kernel[:, units:units * 2]
        kernel_c = kernel[:, units * 2:units * 3]
        kernel_o = kernel[:, units * 3:]

        recurrent_kernel_i = recurrent_kernel[:, :units]
        recurrent_kernel_f = recurrent_kernel[:, units:units * 2]
        recurrent_kernel_c = recurrent_kernel[:, units * 2:units * 3]
        recurrent_kernel_o = recurrent_kernel[:, units * 3:]

        bias_i = bias[:units]
        bias_f = bias[units:units * 2]
        bias_c = bias[units * 2:units * 3]
        bias_o = bias[units * 3:]

        activation = eval(activation)
        ractivation = hard_sigmoid
        h_tm1 = np.zeros((1, units), dtype='float32')
        c_tm1 = np.zeros((1, units), dtype='float32')
        op = np.zeros(length, dtype='float32')
        X_train = X_train.astype('float32')
        fg = np.zeros((units, length))
        ig = np.zeros((units, length))
        og = np.zeros((units, length))
        c_log = np.zeros((units, length))
        h_log = np.zeros((units, length))
        for idx in range(length):
            inputs = X_train[:, idx, :]

            x_i = np.dot(inputs, kernel_i)
            x_f = np.dot(inputs, kernel_f)
            x_c = np.dot(inputs, kernel_c)
            x_o = np.dot(inputs, kernel_o)

            x_i = np.add(x_i, bias_i)
            x_f = np.add(x_f, bias_f)
            x_c = np.add(x_c, bias_c)
            x_o = np.add(x_o, bias_o)

            h_tm1_i = h_tm1
            h_tm1_f = h_tm1
            h_tm1_c = h_tm1
            h_tm1_o = h_tm1

            i = ractivation(np.add(x_i, np.dot(h_tm1_i, recurrent_kernel_i)))
            f = ractivation(np.add(x_f, np.dot(h_tm1_f, recurrent_kernel_f)))

            c = np.add(
                f * c_tm1,
                i *
                activation(np.add(x_c, np.dot(h_tm1_c, recurrent_kernel_c))))

            o = ractivation(np.add(x_o, np.dot(h_tm1_o, recurrent_kernel_o)))
            h = o * activation(c)

            h_tm1 = h + 0
            c_tm1 = c + 0
            fg[:,
               idx], ig[:,
                        idx], og[:,
                                 idx], c_log[:,
                                             idx], h_log[:,
                                                         idx] = f, i, o, c, h

        idx = 1
        # for idx in range(units):
        plt.plot(fg[idx, :], label='Forget gate')
        plt.plot(ig[idx, :], label='Input gate')
        plt.plot(og[idx, :], label='Output gate')
        plt.plot(c_log[idx, :], label='Internal state')
        plt.plot(h_log[idx, :], label='Output')
        legend_properties = {'weight': 'bold'}
        plt.plot(X_train[0, :, 1], '*', label='Mask')
        plt.legend(prop=legend_properties)
        plt.savefig('LSTM_mask_add.pdf', dpi=500)

        # Dense layer computation
        op = np.dot(h_tm1, dense_kernel)
        op = np.add(op, dense_bias)

    if (RNN_model == lpRNN):
        op = np.zeros(length, dtype='float32')
        rnn_activation = activations[0]
        dense_kernel = weights[4]
        dense_bias = weights[5]
        for idx in range(length):
            op[idx] = np.dot(rnn_activation[0, idx, :], dense_kernel)
            op[idx] += dense_bias
        plt.plot(X_train[0, :, 1], '*', label='Mask')
        print(op.shape)
        for idx in range(nb_hid):
            plt.plot(rnn_activation[0, :, idx], alpha=0.6, lw=1)
        plt.plot(op, label='Activation', lw=3)

    plt.xlabel(r'Time step', weight='bold')
    plt.ylabel(r'Value', weight='bold')
    plt.show()

    # ----- Inference run -----
    output = model.predict(X_train)
    info(f'Expected output = {Y_train}. Actual output = {output}')

    return output