예제 #1
0
def translate(tokens, translation=None):
    translations = []

    inferred_meanings = None
    if translation is not None:
        inferred_meanings = Infer.infer(tokens, translation)

    i = -1
    for token in tokens:
        i += 1
        jp = token.word
        last = i == len(tokens) - 1
        if not last:
            if is_sentence_ending_symbol(tokens[i + 1].word):
                last = True

        if is_ending(token):
            translation = translate_ending(token)
            translations.append(Translation(token, translation))
            continue

        translation = match_special(jp, last)
        if translation:
            translations.append(Translation(token, translation))
            continue

        if inferred_meanings is not None:
            translation = inferred_meanings[i]
            if translation is not None:
                translations.append(Translation(token, translation))
                continue

        translation = get_translation_from_dictionary(token)
        if translation:
            translations.append(Translation(token, translation))
            continue

        if is_katakana(jp):
            translations.append(Translation(token, Katakana.translate(jp)))
            continue

        if is_english(jp):
            translations.append(Translation(token, jp))
            continue

        if is_number(jp):
            translations.append(Translation(token, Numbers.convert(jp)))
            continue

        translations.append(Translation(token, token.word))

    return translations
예제 #2
0
t = np.linspace(-0.1, 0.1, 300)
t_pred = np.linspace(-0.12, 0.12, 1000)
flux = Transit_aRs(lc_pars,
                   t) + 0.0005 * np.sin(2 * np.pi * 40 * t) + np.random.normal(
                       0, hp[-1], t.size)

#guess parameter values and guess uncertainties
guess_pars = hp + gp
err_pars = np.array([0.0004, 0.1, 0.0001] +
                    [0.00001, 0, 0.2, 0.0003, 0.02, 0.0, 0.0, 0.001, 0.])

#construct the GP
MyGP = Infer.GP(flux,
                np.matrix([
                    t,
                ]).T,
                p=guess_pars,
                mf=Transit_aRs,
                mf_args=t,
                n_hp=3)
#MyGP.logPrior = lambda p: np.log(norm_dist.pdf(p[6],.10,0.02)).sum()

#make plot of the function
pylab.figure(1)
pylab.plot(MyGP.mf_args, MyGP.t, 'k.')
pylab.plot(MyGP.mf_args, MyGP.MeanFunction(), 'g-')

#get optimised parameters
#MyGP.pars = Infer.Optimise(MyGP.logPosterior,guess_pars[:],(),fixed=(np.array(err_pars) == 0)*1)
#pylab.plot(MyGP.mf_args,MyGP.MeanFunction(),'r-')
MyGP.Optimise(fp=(np.array(err_pars) == 0) * 1)
예제 #3
0

gp.logPrior = logPrior

#optimise the free parameters
gp.optimise()
gp.plot()

#can also run an MCMC by using GP.logPosterior()
ch = 10000
conv = 0.4 * ch
lims = (0, conv, 10)
Infer.MCMC_N(gp.logPosterior,
             gp.p, (),
             ch,
             gp.ep,
             N=2,
             adapt_limits=lims,
             glob_limits=lims)
#Infer.AffInvMCMC(gp.logPosterior,gp.p,(),500,ch/500,gp.ep*0.01,n_chains=2)
pylab.figure(2)
Infer.PlotCorrelations(conv, n_chains=2, p=np.where(np.array(gp.ep) > 0)[0])
#pylab.savefig('Correlations.png')

#get the parameters and uncertainties from the MCMC
gp.p, gp.ep = Infer.AnalyseChains(conv, n_chains=2)

#and plot
pylab.figure(1)
gp.plot()
예제 #4
0
def blink_detector(output_textfile, input_video):

    Q = Queue(maxsize=7)
    deque_blinks = deque(maxlen=30)
    drowsy_level = "Calculating drowsiness level... "

    FRAME_MARGIN_BTW_2BLINKS = 3
    MIN_AMPLITUDE = 0.04
    MOUTH_AR_THRESH = 0.35
    MOUTH_AR_THRESH_ALERT = 0.30
    MOUTH_AR_CONSEC_FRAMES = 20

    EPSILON = 0.01  # for discrete derivative (avoiding zero derivative)

    class Blink():
        def __init__(self):

            self.start = 0  #frame
            self.startEAR = 1
            self.peak = 0  #frame
            self.peakEAR = 1
            self.end = 0  #frame
            self.endEAR = 0
            self.amplitude = (self.startEAR + self.endEAR -
                              2 * self.peakEAR) / 2
            self.duration = self.end - self.start + 1
            self.EAR_of_FOI = 0  #FrameOfInterest
            self.values = []
            self.velocity = 0  #Eye-closing velocity

    def eye_aspect_ratio(eye):
        # compute the euclidean distances between the two sets of
        # vertical eye landmarks (x, y)-coordinates
        A = dist.euclidean(eye[1], eye[5])
        B = dist.euclidean(eye[2], eye[4])

        # compute the euclidean distance between the horizontal
        # eye landmark (x, y)-coordinates
        C = dist.euclidean(eye[0], eye[3])

        if C < 0.1:  #practical finetuning due to possible numerical issue as a result of optical flow
            ear = 0.3
        else:
            # compute the eye aspect ratio
            ear = (A + B) / (2.0 * C)
        if ear > 0.45:  #practical finetuning due to possible numerical issue as a result of optical flow
            ear = 0.45
        # return the eye aspect ratio
        return ear

    def mouth_aspect_ratio(mouth):

        A = dist.euclidean(mouth[14], mouth[18])

        C = dist.euclidean(mouth[12], mouth[16])

        if C < 0.1:  #practical finetuning
            mar = 0.2
        else:
            # compute the mouth aspect ratio
            mar = (A) / (C)

        # return the mouth aspect ratio
        return mar

    def EMERGENCY(ear, COUNTER):
        if ear < 0.21:
            COUNTER += 1

            if COUNTER >= 50:
                # client.messages.create(to="+16505466275",
                #         from_="+15674434352",
                #         body="This is an emergency!")
                call = client.calls.create(
                    twiml=
                    '<Response><Say>Sunny is very drowsy! This is an emergency!</Say></Response>',
                    to='+16505466275',
                    from_='+15674434352')

                print(call.sid)
                print('EMERGENCY SITUATION (EYES TOO LONG CLOSED)')
                print(COUNTER)
                COUNTER = 0
        else:
            COUNTER = 0
        return COUNTER

    def Linear_Interpolate(start, end, N):
        m = (end - start) / (N + 1)
        x = np.linspace(1, N, N)
        y = m * (x - 0) + start
        return list(y)

    def Ultimate_Blink_Check():
        #Given the input "values", retrieve blinks and their quantities
        retrieved_blinks = []
        MISSED_BLINKS = False
        values = np.asarray(Last_Blink.values)
        THRESHOLD = 0.4 * np.min(values) + 0.6 * np.max(
            values)  # this is to split extrema in highs and lows
        N = len(values)
        Derivative = values[1:N] - values[0:N -
                                          1]  #[-1 1] is used for derivative
        i = np.where(Derivative == 0)
        if len(i[0]) != 0:
            for k in i[0]:
                if k == 0:
                    Derivative[0] = -EPSILON
                else:
                    Derivative[k] = EPSILON * Derivative[k - 1]
        M = N - 1  #len(Derivative)
        ZeroCrossing = Derivative[1:M] * Derivative[0:M - 1]
        x = np.where(ZeroCrossing < 0)
        xtrema_index = x[0] + 1
        XtremaEAR = values[xtrema_index]
        Updown = np.ones(
            len(xtrema_index))  # 1 means high, -1 means low for each extremum
        Updown[
            XtremaEAR <
            THRESHOLD] = -1  #this says if the extremum occurs in the upper/lower half of signal
        #concatenate the beginning and end of the signal as positive high extrema
        Updown = np.concatenate(([1], Updown, [1]))
        XtremaEAR = np.concatenate(([values[0]], XtremaEAR, [values[N - 1]]))
        xtrema_index = np.concatenate(([0], xtrema_index, [N - 1]))
        ##################################################################

        Updown_XeroCrossing = Updown[1:len(Updown)] * Updown[0:len(Updown) - 1]
        jump_index = np.where(Updown_XeroCrossing < 0)
        numberOfblinks = int(len(jump_index[0]) / 2)
        selected_EAR_First = XtremaEAR[jump_index[0]]
        selected_EAR_Sec = XtremaEAR[jump_index[0] + 1]
        selected_index_First = xtrema_index[jump_index[0]]
        selected_index_Sec = xtrema_index[jump_index[0] + 1]
        if numberOfblinks > 1:
            MISSED_BLINKS = True
        if numberOfblinks == 0:
            print(Updown, Last_Blink.duration)
            print(values)
            print(Derivative)
        for j in range(numberOfblinks):
            detected_blink = Blink()
            detected_blink.start = selected_index_First[2 * j]
            detected_blink.peak = selected_index_Sec[2 * j]
            detected_blink.end = selected_index_Sec[2 * j + 1]

            detected_blink.startEAR = selected_EAR_First[2 * j]
            detected_blink.peakEAR = selected_EAR_Sec[2 * j]
            detected_blink.endEAR = selected_EAR_Sec[2 * j + 1]

            detected_blink.duration = detected_blink.end - detected_blink.start + 1
            detected_blink.amplitude = 0.5 * (
                detected_blink.startEAR - detected_blink.peakEAR) + 0.5 * (
                    detected_blink.endEAR - detected_blink.peakEAR)
            detected_blink.velocity = (
                detected_blink.endEAR - selected_EAR_First[2 * j + 1]) / (
                    detected_blink.end - selected_index_First[2 * j + 1] + 1
                )  #eye opening ave velocity
            retrieved_blinks.append(detected_blink)

        return MISSED_BLINKS, retrieved_blinks

    def Blink_Tracker(EAR, IF_Closed_Eyes, Counter4blinks, TOTAL_BLINKS, skip):
        BLINK_READY = False
        #If the eyes are closed
        if int(IF_Closed_Eyes) == 1:
            Current_Blink.values.append(EAR)
            Current_Blink.EAR_of_FOI = EAR  #Save to use later
            if Counter4blinks > 0:
                skip = False
            if Counter4blinks == 0:
                Current_Blink.startEAR = EAR  #EAR_series[6] is the EAR for the frame of interest(the middle one)
                Current_Blink.start = reference_frame - 6  #reference-6 points to the frame of interest which will be the 'start' of the blink
            Counter4blinks += 1
            if Current_Blink.peakEAR >= EAR:  #deciding the min point of the EAR signal
                Current_Blink.peakEAR = EAR
                Current_Blink.peak = reference_frame - 6

        # otherwise, the eyes are open in this frame
        else:

            if Counter4blinks < 2 and skip == False:  # Wait to approve or reject the last blink
                if Last_Blink.duration > 15:
                    FRAME_MARGIN_BTW_2BLINKS = 8
                else:
                    FRAME_MARGIN_BTW_2BLINKS = 1
                if ((reference_frame - 6) -
                        Last_Blink.end) > FRAME_MARGIN_BTW_2BLINKS:
                    # Check so the prev blink signal is not monotonic or too small (noise)
                    if Last_Blink.peakEAR < Last_Blink.startEAR and Last_Blink.peakEAR < Last_Blink.endEAR and Last_Blink.amplitude > MIN_AMPLITUDE and Last_Blink.start < Last_Blink.peak:
                        if ((Last_Blink.startEAR - Last_Blink.peakEAR) >
                            (Last_Blink.endEAR - Last_Blink.peakEAR) * 0.25 and
                            (Last_Blink.startEAR - Last_Blink.peakEAR) * 0.25 <
                            (Last_Blink.endEAR -
                             Last_Blink.peakEAR)):  # the amplitude is balanced
                            BLINK_READY = True
                            #####THE ULTIMATE BLINK Check

                            Last_Blink.values = signal.convolve1d(
                                Last_Blink.values, [1 / 3.0, 1 / 3.0, 1 / 3.0],
                                mode='nearest')
                            # Last_Blink.values=signal.median_filter(Last_Blink.values, 3, mode='reflect')   # smoothing the signal
                            [MISSED_BLINKS,
                             retrieved_blinks] = Ultimate_Blink_Check()
                            #####
                            TOTAL_BLINKS = TOTAL_BLINKS + len(
                                retrieved_blinks
                            )  # Finally, approving/counting the previous blink candidate
                            ###Now You can count on the info of the last separate and valid blink and analyze it
                            Counter4blinks = 0
                            print("MISSED BLINKS= {}".format(
                                len(retrieved_blinks)))
                            return retrieved_blinks, int(
                                TOTAL_BLINKS
                            ), Counter4blinks, BLINK_READY, skip
                        else:
                            skip = True
                            print('rejected due to imbalance')
                    else:
                        skip = True
                        print('rejected due to noise,magnitude is {}'.format(
                            Last_Blink.amplitude))
                        print(Last_Blink.start < Last_Blink.peak)

            # if the eyes were closed for a sufficient number of frames (2 or more)
            # then this is a valid CANDIDATE for a blink
            if Counter4blinks > 1:
                Current_Blink.end = reference_frame - 7  #reference-7 points to the last frame that eyes were closed
                Current_Blink.endEAR = Current_Blink.EAR_of_FOI
                Current_Blink.amplitude = (Current_Blink.startEAR +
                                           Current_Blink.endEAR -
                                           2 * Current_Blink.peakEAR) / 2
                Current_Blink.duration = Current_Blink.end - Current_Blink.start + 1

                if Last_Blink.duration > 15:
                    FRAME_MARGIN_BTW_2BLINKS = 8
                else:
                    FRAME_MARGIN_BTW_2BLINKS = 1
                if (
                        Current_Blink.start - Last_Blink.end
                ) <= FRAME_MARGIN_BTW_2BLINKS + 1:  #Merging two close blinks
                    print('Merging...')
                    frames_in_between = Current_Blink.start - Last_Blink.end - 1
                    print(Current_Blink.start, Last_Blink.end,
                          frames_in_between)
                    valuesBTW = Linear_Interpolate(Last_Blink.endEAR,
                                                   Current_Blink.startEAR,
                                                   frames_in_between)
                    Last_Blink.values = Last_Blink.values + valuesBTW + Current_Blink.values
                    Last_Blink.end = Current_Blink.end  # update the end
                    Last_Blink.endEAR = Current_Blink.endEAR
                    if Last_Blink.peakEAR > Current_Blink.peakEAR:  #update the peak
                        Last_Blink.peakEAR = Current_Blink.peakEAR
                        Last_Blink.peak = Current_Blink.peak
                        #update duration and amplitude
                    Last_Blink.amplitude = (Last_Blink.startEAR +
                                            Last_Blink.endEAR -
                                            2 * Last_Blink.peakEAR) / 2
                    Last_Blink.duration = Last_Blink.end - Last_Blink.start + 1
                else:  #Should not Merge (a Separate blink)

                    Last_Blink.values = Current_Blink.values  #update the EAR list

                    Last_Blink.end = Current_Blink.end  # update the end
                    Last_Blink.endEAR = Current_Blink.endEAR

                    Last_Blink.start = Current_Blink.start  #update the start
                    Last_Blink.startEAR = Current_Blink.startEAR

                    Last_Blink.peakEAR = Current_Blink.peakEAR  #update the peak
                    Last_Blink.peak = Current_Blink.peak

                    Last_Blink.amplitude = Current_Blink.amplitude
                    Last_Blink.duration = Current_Blink.duration

            # reset the eye frame counter
            Counter4blinks = 0
        retrieved_blinks = 0
        return retrieved_blinks, int(
            TOTAL_BLINKS), Counter4blinks, BLINK_READY, skip

    # initialize the frame counters and the total number of yawnings
    COUNTER = 0
    MCOUNTER = 0
    TOTAL = 0
    MTOTAL = 0
    TOTAL_BLINKS = 0
    Counter4blinks = 0
    skip = False  # to make sure a blink is not counted twice in the Blink_Tracker function
    Last_Blink = Blink()

    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    #Load the Facial Landmark Detector
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
    #Load the Blink Detector
    loaded_svm = pickle.load(
        open('Trained_SVM_C=1000_gamma=0.1_for 7kNegSample.sav', 'rb'))
    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
    print("[INFO] starting video stream thread...")

    lk_params = dict(winSize=(13, 13),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    EAR_series = np.zeros([13])
    # Frame_series=np.linspace(1,13,13)
    reference_frame = 0
    First_frame = True
    # top = tk.Tk()
    # frame1 = Frame(top)
    # frame1.grid(row=0, column=0)
    # fig = plt.figure()
    # ax = fig.add_subplot(111)
    # plot_frame =FigureCanvasTkAgg(fig, master=frame1)
    # plot_frame.get_tk_widget().pack(side=tk.BOTTOM, expand=True)
    # plt.ylim([0.0, 0.5])
    # line, = ax.plot(Frame_series,EAR_series)
    # plot_frame.draw()

    # loop over frames from the video stream

    stream = cv2.VideoCapture(path)
    start = datetime.datetime.now()
    number_of_frames = 0
    while True:
        (grabbed, frame) = stream.read()
        if not grabbed:
            print('not grabbed')
            print(number_of_frames)
            break

        frame = imutils.resize(frame, width=450)

        # To Rotate by 90 degreees
        # rows=np.shape(frame)[0]
        # cols = np.shape(frame)[1]
        # M = cv2.getRotationMatrix2D((cols / 2, rows / 2),-90, 1)
        # frame = cv2.warpAffine(frame, M, (cols, rows))

        gray = cv2.cvtColor(
            frame, cv2.COLOR_BGR2GRAY)  #Brighten the image(Gamma correction)
        reference_frame = reference_frame + 1
        gray = adjust_gamma(gray, gamma=1.5)
        Q.put(frame)
        end = datetime.datetime.now()
        ElapsedTime = (end - start).total_seconds()

        # detect faces in the grayscale frame
        rects = detector(gray, 0)
        if (np.size(rects) != 0):
            number_of_frames = number_of_frames + 1  # we only consider frames that face is detected
            First_frame = False
            old_gray = gray.copy()
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rects[0])
            shape = face_utils.shape_to_np(shape)

            ###############YAWNING##################
            #######################################
            Mouth = shape[mStart:mEnd]
            MAR = mouth_aspect_ratio(Mouth)
            MouthHull = cv2.convexHull(Mouth)
            #cv2.drawContours(frame, [MouthHull], -1, (255, 0, 0), 1)

            if MAR > MOUTH_AR_THRESH:
                MCOUNTER += 1

            elif MAR < MOUTH_AR_THRESH_ALERT:
                if MCOUNTER >= MOUTH_AR_CONSEC_FRAMES:
                    MTOTAL += 1
                MCOUNTER = 0

            ##############YAWNING####################
            #########################################

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0
            #EAR_series[reference_frame]=ear
            EAR_series = shift(EAR_series, -1, cval=ear)

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            #cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            #cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            ############ Show drowsiness level ########################
            ###########################################################

            # cv2.putText(frame,f"Drowsy Level:{drowsy_level}",
            #             (10,250),
            #             cv2.FONT_HERSHEY_SIMPLEX,
            #             0.5,
            #             [0,0,255],
            #             1
            #             )

            ############HANDLING THE EMERGENCY SITATION################
            ###########################################################
            ###########################################################
            COUNTER = EMERGENCY(ear, COUNTER)

            # EMERGENCY SITUATION (EYES TOO LONG CLOSED) ALERT THE DRIVER IMMEDIATELY
            ############HANDLING THE EMERGENCY SITATION################
            ###########################################################
            ###########################################################

            if Q.full() and (
                    reference_frame > 15
            ):  #to make sure the frame of interest for the EAR vector is int the mid
                EAR_table = EAR_series
                IF_Closed_Eyes = loaded_svm.predict(EAR_series.reshape(1, -1))
                if Counter4blinks == 0:
                    Current_Blink = Blink()
                retrieved_blinks, TOTAL_BLINKS, Counter4blinks, BLINK_READY, skip = Blink_Tracker(
                    EAR_series[6], IF_Closed_Eyes, Counter4blinks,
                    TOTAL_BLINKS, skip)
                if (BLINK_READY == True):
                    reference_frame = 20  #initialize to a random number to avoid overflow in large numbers
                    skip = True
                    #####
                    BLINK_FRAME_FREQ = TOTAL_BLINKS / number_of_frames
                    for detected_blink in retrieved_blinks:
                        print(detected_blink.amplitude, Last_Blink.amplitude)
                        print(detected_blink.duration, detected_blink.velocity)
                        print('-------------------')
                        deque_blinks.append([
                            BLINK_FRAME_FREQ * 100, detected_blink.amplitude,
                            detected_blink.duration, detected_blink.velocity
                        ])
                        print(f"len(deque_blinks)={len(deque_blinks)}")
                        if len(deque_blinks) == 30:
                            deque_blinks_reshaped = np.array(
                                deque_blinks).reshape(1, -1, 4)
                            drowsy_level = Infer.how_drowsy(
                                deque_blinks_reshaped)
                            np_array_to_list = deque_blinks_reshaped.tolist()
                            json_file = "file.json"
                            json.dump(np_array_to_list,
                                      codecs.open(json_file,
                                                  'w',
                                                  encoding='utf-8'),
                                      sort_keys=True,
                                      indent=4)

                            print(f"Drowsy Level={drowsy_level}")

                        if (detected_blink.velocity > 0):
                            with open(output_file, 'ab') as f_handle:
                                f_handle.write(b'\n')
                                np.savetxt(f_handle, [
                                    TOTAL_BLINKS, BLINK_FRAME_FREQ * 100,
                                    detected_blink.amplitude,
                                    detected_blink.duration,
                                    detected_blink.velocity
                                ],
                                           delimiter=', ',
                                           newline=' ',
                                           fmt='%.4f')
                    Last_Blink.end = -10  # re initialization
                    #####

                # line.set_ydata(EAR_series)
                # print(EAR_series)
                # plot_frame.draw()
                frameMinus7 = Q.get()
                # cv2.imshow("Frame", frameMinus7)
            elif Q.full(
            ):  #just to make way for the new input of the Q when the Q is full
                junk = Q.get()

            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key != 0xFF:
                break
        #Does not detect any face
        else:
            ###################Using Optical Flow############
            ###################    (Optional)    ############
            st = 0
            st2 = 0
            if (First_frame == False):
                leftEye = leftEye.astype(np.float32)
                rightEye = rightEye.astype(np.float32)
                p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, gray, leftEye,
                                                       None, **lk_params)
                p2, st2, err2 = cv2.calcOpticalFlowPyrLK(
                    old_gray, gray, rightEye, None, **lk_params)

            if np.sum(st) + np.sum(st2) == 12 and First_frame == False:

                p1 = np.round(p1).astype(np.int)
                p2 = np.round(p2).astype(np.int)
                #print(p1)

                leftEAR = eye_aspect_ratio(p1)
                rightEAR = eye_aspect_ratio(p2)

                ear = (leftEAR + rightEAR) / 2.0
                EAR_series = shift(EAR_series, -1, cval=ear)
                #EAR_series[reference_frame] = ear
                leftEyeHull = cv2.convexHull(p1)
                rightEyeHull = cv2.convexHull(p2)
                # cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
                # cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
                old_gray = gray.copy()
                leftEye = p1
                rightEye = p2
                ############HANDLING THE EMERGENCY SITATION################
                ###########################################################
                ###########################################################
                COUNTER = EMERGENCY(ear, COUNTER)
                ############HANDLING THE EMERGENCY SITATION################
                ###########################################################
                ###########################################################

            ###################Using Optical Flow############
            ###################                  ############

            if Q.full() and (reference_frame > 15):
                EAR_table = EAR_series
                IF_Closed_Eyes = loaded_svm.predict(EAR_series.reshape(1, -1))
                if Counter4blinks == 0:
                    Current_Blink = Blink()
                    retrieved_blinks, TOTAL_BLINKS, Counter4blinks, BLINK_READY, skip = Blink_Tracker(
                        EAR_series[6], IF_Closed_Eyes, Counter4blinks,
                        TOTAL_BLINKS, skip)
                if (BLINK_READY == True):
                    reference_frame = 20  #initialize to a random number to avoid overflow in large numbers
                    skip = True
                    #####
                    BLINK_FRAME_FREQ = TOTAL_BLINKS / number_of_frames
                    for detected_blink in retrieved_blinks:
                        print(detected_blink.amplitude, Last_Blink.amplitude)
                        print(detected_blink.duration, Last_Blink.duration)
                        print('-------------------')
                        with open(output_file, 'ab') as f_handle:
                            f_handle.write(b'\n')
                            np.savetxt(f_handle, [
                                TOTAL_BLINKS, BLINK_FRAME_FREQ * 100,
                                detected_blink.amplitude,
                                detected_blink.duration,
                                detected_blink.velocity
                            ],
                                       delimiter=', ',
                                       newline=' ',
                                       fmt='%.4f')

                    Last_Blink.end = -10  # re initialization

                    #####

                # line.set_ydata(EAR_series)
                # plot_frame.draw()
                # print(EAR_series)
                frameMinus7 = Q.get()
                # cv2.imshow("Frame", frameMinus7)
            elif Q.full():
                junk = Q.get()

            key = cv2.waitKey(1) & 0xFF

            if key != 0xFF:
                break

    # do a bit of cleanup
    stream.release()
    cv2.destroyAllWindows()
예제 #5
0
#
gp = [0.7,1.7,3.2]
ep = [0.01,0.01,0.01]

print "means =", m
print "errs =", np.sqrt(np.diag(K))

#define MCMC parameters
chain_len = 40000
conv = 20000
thin = 10
no_ch=2

#run the MCMC
Infer.MCMC(LogNormalDist,gp,(m,invK,logdetK),chain_len,ep,n_chains=no_ch,adapt_limits=(5000,20000,5),glob_limits=(5000,20000,5),thin=thin)
#Get parameters values/errors from chains
par,par_err = Infer.AnalyseChains(conv/thin,n_chains=no_ch)
bf_par = Infer.GetBestFit(n_chains=no_ch)
print "Best Fit log p =", LogNormalDist(bf_par,m,invK,logdetK)

#plot the chains and correlations
#pylab.figure(2)
#Infer.PlotChains(conv/thin,n_chains=no_ch,p=[0,2,3,4],labels=lab)
pylab.figure(3)
Infer.PlotCorrelations(conv/thin,n_chains=no_ch)

############ Importance sampling stage #################
#get mean and covariance from the MCMC chains
m,K = Infer.NormalFromMCMC(conv/thin,n_chains=no_ch)
#or use 'perfect' mean and K
예제 #6
0
#create the data set (ie training data)
time = np.arange(-0.1, 0.1, 0.001)
flux = Transit_aRs(lc_pars, time) + np.random.normal(0, wn, time.size)

#guess parameter values and guess uncertainties
guess_pars = lc_pars + [wn]
err_pars = [0.00001, 0, 0.2, 0.0003, 0.02, 0.0, 0.0, 0.001, 0.0001, 0.0001]

#plot the light curve + guess function
pylab.figure(1)
pylab.errorbar(time, flux, yerr=wn, fmt='.')
pylab.plot(time, Transit_aRs(guess_pars[:-1], time), 'r--')

#first optimise the function
guess_pars = Infer.Optimise(LogLikelihood_iid_mf,
                            guess_pars[:], (Transit_aRs, time, flux),
                            fixed=(np.array(err_pars) == 0) * 1)

#run a standard MCMC
chain_len = 40000
conv = 10000
thin = 10
no_ch = 2
adapt_lims = (2000, conv, 5)
glob_lims = (2000, conv, 5)
glob_lims = (0, 0, 0)
Infer.MCMC(LogLikelihood_iid_mf,
           guess_pars[:], (Transit_aRs, time, flux),
           chain_len,
           err_pars,
           n_chains=no_ch,
예제 #7
0
파일: Aff_test2.py 프로젝트: apooja19/Infer
#multivariate - sum of p/2 coupled Rosenbrock functions
def InvNRosenbrock(p):
    R = 0.
    for i in range(len(p) / 2):
        R += 100. * (p[2 * i]**2 - p[2 * i + 1])**2. + (p[2 * i] - 1.)**2.
    return -0.4 * R


gp = [1., 1., 1., 1., 1., 1., 1., 1.]
ep = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]

gp = np.array(gp) + np.random.normal(0, 1, len(gp))

#first optimise the function
gp = Infer.Optimise(InvNRosenbrock, gp[:], (), fixed=(np.array(ep) == 0) * 1)

#run a normal MCMC
chain_len = 50000
conv = 2000
thin = 1
no_ch = 3
adapt_lims = (200, conv, 10)
glob_lims = (200, conv, 10)
Infer.MCMC(InvNRosenbrock,
           gp[:], (),
           chain_len,
           ep,
           n_chains=no_ch,
           adapt_limits=adapt_lims,
           glob_limits=glob_lims,
예제 #8
0
pylab.figure(1)
pylab.errorbar(time, flux, yerr=wn, fmt='.')
pylab.plot(time, Transit_aRs(guess_pars[:-1], time), 'r--')

#define MCMC parameters
chain_len = 20000
conv = 10000
thin = 10
no_ch = 2
adapt_lims = (2000, 10000, 3)
glob_lims = (2000, 20000, 10)

#first optimise the function and get parameter errors from the conditionals
guess_pars, err_pars = Infer.ConditionalErrors(LogLikelihood_iid_mf,
                                               guess_pars,
                                               err_pars,
                                               (Transit_aRs, time, flux),
                                               plot=0,
                                               opt=True)
Infer.MCMC(LogLikelihood_iid_mf,
           guess_pars, (Transit_aRs, time, flux),
           chain_len,
           err_pars,
           n_chains=no_ch,
           adapt_limits=adapt_lims,
           glob_limits=glob_lims,
           thin=thin)
par, par_err = Infer.AnalyseChains(conv / thin, n_chains=no_ch)

#plot the chains and correlations
#pylab.figure(2)
#Infer.PlotChains(conv/thin,n_chains=no_ch,p=[0,2,3,4],labels=lab)
예제 #9
0
#guess parameter values and guess uncertainties
guess_pars = lc_pars + [wn]
err_pars = [0.00001, 0, 0.2, 0.0003, 0.02, 0.0, 0.0, 0.00001, 0.00001, 0.00001]

#plot the light curve + guess function
pylab.figure(1)
pylab.errorbar(time, flux, yerr=wn, fmt='.')
pylab.plot(time, Transit_aRs(guess_pars[:-1], time), 'r--')

#first optimise the function
#guess_pars = Infer.Optimise(LogLikelihood_iid_mf,guess_pars[:],(Transit_aRs,time,flux),fixed=(np.array(err_pars) == 0)*1)

#find the conditional errors to seed an MCMC
pylab.figure(2)
p, e = Infer.ConditionalErrors(LogLikelihood_iid_mf,
                               guess_pars,
                               err_pars, (Transit_aRs, time, flux),
                               plot=1)

#run the MCMC
#define MCMC parameters
chain_len = 100000
conv = 50000
thin = 10
no_ch = 2
adapt_lims = (0, 0, 0)
glob_lims = (2000, 100000, 4)
Infer.MCMC(LogLikelihood_iid_mf,
           p, (Transit_aRs, time, flux),
           chain_len,
           e,
           n_chains=no_ch,
예제 #10
0
# MyMCMC.Ballpark(lf,pl,pu,(f,x,y),5000,filename="testfile",log_proposal=[1,1,1,1])
# pg = MyMCMC.ExtractBallpark(filename="testfile")

#plot test function
pylab.figure(1)
pylab.plot(x,y,'k.')
pylab.plot(x,f(p[:3],x),'r-')
pylab.plot(x,f(pg[:3],x),'g--') #plot guess function

#run the MCMC (using C version)
MyMCMC.MCMC(lf,pg,(f,x,y),chain_len,dp,n_chains=no_ch,glob_limits=limits,adapt_limits=limits)
#Get parameters values/errors from chains
par,par_err = MyMCMC.AnalyseChains(conv,n_chains=no_ch)
print

#run the PyMCMC
Infer.MCMC(lf,pg,(f,x,y),chain_len,dp,n_chains=no_ch,glob_limits=limits,adapt_limits=limits,thin=thin)
#Get parameters values/errors from chains
par,par_err = Infer.AnalyseChains(conv/thin,n_chains=no_ch)
print

#plot fitted function
pylab.figure(1)
pylab.plot(x,f(par[:3],x),'b-')
# pylab.figure(2)
# Infer.PlotChains(conv/thin,n_chains=no_ch)
pylab.figure(3)
Infer.PlotCorrelations(conv/thin,n_chains=no_ch,n_samples=1000)

raw_input()
예제 #11
0
lc_pars = [.0,2.5,11.,.1,0.6,0.2,0.3,1.,0.]
hp = [0.0003,0.01,0.0003]

#create the data set (ie training data)
t = np.linspace(-0.1,0.1,300)
t_pred = np.linspace(-0.12,0.12,1000)
flux = Transit_aRs(lc_pars,t) + np.random.normal(0,hp[-1],t.size)

#guess parameter values and guess uncertainties
guess_pars = hp + lc_pars
err_pars = np.array([0.00001,1,0.0001] + [0.00001,0,0.2,0.0003,0.02,0.0,0.0,0.001,0.0001])

X = np.matrix([t,]).T
X_pred = np.matrix([t_pred,]).T

MyGP = Infer.GP(flux,X,p=guess_pars,mf=Transit_aRs,mf_args=t,n_hp=3)

#make plot of the function
pylab.figure(1)
pylab.plot(MyGP.mf_args,MyGP.t,'k.')
pylab.plot(MyGP.mf_args,MyGP.mf(lc_pars,MyGP.mf_args),'g-')

start = time.time() 
for i in range(100):
  gp = np.array(guess_pars) + np.random.normal(0,1) * err_pars
#  print gp
  lik = MyGP.logPosterior(gp)
#  print lik
print " t = %.2f s" % (time.time()-start)

MyGP.pars = Infer.Optimise(MyGP.logPosterior,guess_pars[:],(),fixed=(np.array(err_pars) == 0)*1)
예제 #12
0
파일: Aff_test1.py 프로젝트: apooja19/Infer
#create the data set (ie training data)
time = np.linspace(-0.1,0.1,300)
flux = Transit_aRs(lc_pars,time) + np.random.normal(0,wn,time.size)

#guess parameter values and guess uncertainties
guess_pars = lc_pars + [wn]
err_pars = [0.0001,0,0.05,0.003,0.002,0.0,0.0,0.01,0.0001,0.00010]

#plot the light curve + guess function
pylab.figure(1)
pylab.errorbar(time,flux,yerr=wn,fmt='.')
pylab.plot(time,Transit_aRs(guess_pars[:-1],time),'r--')

#first optimise the function
guess_pars = Infer.Optimise(Posterior,guess_pars[:],(Transit_aRs,time,flux),fixed=(np.array(err_pars) == 0)*1,method='BFGS')

#run a normal MCMC
chain_len = 60000
conv = 30000
thin = 10
no_ch=3
adapt_lims = (2000,conv,10)
glob_lims = (2000,conv,10)
# Infer.MCMC(Posterior,guess_pars[:],(Transit_aRs,time,flux),chain_len,err_pars,n_chains=no_ch,adapt_limits=adapt_lims,glob_limits=glob_lims,thin=thin)
# #Get parameters values/errors from chains
# par,par_err = Infer.AnalyseChains(conv/thin,n_chains=no_ch)
# bf_par = Infer.GetBestFit(n_chains=no_ch)
# print "Best Fit log p =", LogLikelihood_iid_mf(bf_par,Transit_aRs,time,flux)
# pylab.figure(3)
# Infer.PlotCorrelations(conv/thin,n_chains=no_ch,p=np.where(np.array(par_err)>0.)[0])
예제 #13
0
chain_len = 100000
conv = 50000
thin = 10
no_ch = 2
adapt_lims = (10000, 40000, 3)
glob_lims = (2000, 100000, 4)

#first optimise the function
#guess_pars = Infer.Optimise(LogLikelihood_iid_mf,guess_pars[:],(Transit_aRs,time,flux),fixed=(np.array(err_pars) == 0)*1)

#run the MCMC with orthogonal steps
Infer.MCMC(LogLikelihood_iid_mf,
           guess_pars[:], (Transit_aRs, time, flux),
           chain_len,
           err_pars,
           n_chains=no_ch,
           adapt_limits=adapt_lims,
           glob_limits=glob_lims,
           thin=thin,
           orth=True)
#Get parameters values/errors from chains
par, par_err = Infer.AnalyseChains(conv / thin, n_chains=no_ch)

#run the MCMC with covariant steps
Infer.MCMC(LogLikelihood_iid_mf,
           guess_pars[:], (Transit_aRs, time, flux),
           chain_len,
           err_pars,
           n_chains=no_ch,
           adapt_limits=adapt_lims,
           glob_limits=glob_lims,
예제 #14
0
pylab.plot(x, Model3(par3[:-1], x), 'r--')

#MCMC params
chain_len = 25000
conv = 10000
thin = 10
no_ch = 2
limits = (conv / 4, conv, 3)

#first try the linear model
gp = par1[:]
ep = [0.001, 0.001, 0.001]
Infer.MCMC(LogLikelihood_iid_mf,
           gp, (Model1, x, y),
           chain_len,
           ep,
           n_chains=no_ch,
           adapt_limits=limits,
           glob_limits=limits,
           thin=thin)
par, par_err = Infer.AnalyseChains(conv / thin, n_chains=no_ch)
pylab.plot(x, Model1(par[:-1], x), 'b')
m, K = Infer.NormalFromMCMC(conv / thin, n_chains=no_ch)
E1, par, par_err = Infer.ImportanceSamp(LogLikelihood_iid_mf, (Model1, x, y),
                                        m, K, chain_len)
E1, par, par_err = Infer.ImportanceSamp(LogLikelihood_iid_mf, (Model1, x, y),
                                        m, 3. * K, chain_len)

#now try the quadratic model
gp = par2[:]
ep = [0.001, 0.001, 0.001, 0.001]
Infer.MCMC(LogLikelihood_iid_mf,
예제 #15
0
#plot the light curve + guess function
pylab.figure(1)
pylab.errorbar(time, flux, yerr=wn, fmt='.')
pylab.plot(time, Transit_aRs(guess_pars[:-1], time), 'r--')

#define MCMC parameters
chain_len = 100000
conv = 20000
thin = 10
no_ch = 5

#run the MCMC
Infer.MCMC(LogLikelihood_iid_mf,
           guess_pars, (Transit_aRs, time, flux),
           chain_len,
           err_pars,
           n_chains=no_ch,
           adapt_limits=(5000, 20000, 3),
           glob_limits=(5000, 20000, 3),
           thin=thin)

#Get parameters values/errors from chains
par, par_err = Infer.AnalyseChains(conv / thin, n_chains=no_ch)

#plot the chains and correlations
lab = [r'$T_0$', r'$a/R\star$', r'$\rho$', r'$b$']
#pylab.figure(2)
#Infer.PlotChains(conv/thin,n_chains=no_ch,p=[0,2,3,4],labels=lab)
pylab.figure(3)
Infer.PlotCorrelations(conv / thin, n_chains=no_ch, p=[0, 2, 3, 4], labels=lab)

#plot fitted function
예제 #16
0
    optimizer = RAdam.RAdam(t.parameters(), lr=args.lr)
elif args.optimizer == 'Adam':
    optimizer = torch.optim.Adam(t.parameters(), lr=args.lr)

t = t.double()
train_mse = []
test_mse = [10000]

for ij in range(epochs):
    loss_list = []
    for i, batch in enumerate(train_data_loader):
        optimizer.zero_grad()
        in_batch = batch['in'].to(device)
        out = t(in_batch)
        loss = lossfn(batch['out'].to(device), out)
        loss_list.append(loss)
        loss.backward()
        optimizer.step()
    print('Avg. Training Loss in ' + str(ij) + 'th epoch :- ',
          sum(loss_list) / len(loss_list))
    train_mse.append(sum(loss_list) / len(loss_list))
    loss_list = []
    test_mse.append(
        Infer.evaluate(t,
                       loss=args.loss,
                       test_dataset=test_dataset,
                       args_from_train=args))
    if test_mse[-1] == min(test_mse):
        print('saving:- ', test_mse[-1])
        torch.save(t.state_dict(), args.param_file)
예제 #17
0
#gpar = tpar
#epar = np.array([0.000,0.0,0.0,0.01,0.0,0.0,0.0,0.0001,0.00001,0.0000])

#create arrays for bound and fixed parameters (optional)
fixed = (epar == 0) * 1
bounds = np.array([(None, None) for i in range(len(tpar))])
bounds[4][0] = 0.

#create data (time and flux)
t = np.linspace(-0.05, 0.05, 1000)
f = MF.Transit_aRs(tpar[:-1], t) + np.random.normal(0, tpar[-1], t.size)

#perform LM fit to the data
p, pe, wn, K, logE = Infer.LevMar(MF.Transit_aRs,
                                  gpar[:-1], (t, ),
                                  f,
                                  fixed=fixed[:-1],
                                  bounds=None)
print K
print K.shape
print np.sqrt(K.diagonal())
print pe
print np.sqrt(K.diagonal()) == np.hstack([pe, 0.])

#get residuals
resid = f - MF.Transit_aRs(p, t)

#compare with MCMC fit - use LM values as inputs
lims = (0, 4000, 4)
MCMC_p = list(p) + [
    wn,
예제 #18
0
#test the N parallel chain code
chain_len = 11000
conv = 10000
thin = 1
lims = (0, conv, 4)
ext_len = 1000
max_ext = 50
N = 4
import time as timer

start = timer.time()
Infer.MCMC_N(LogLikelihood_iid_mf,
             guess_pars, (Transit_aRs, time, flux),
             chain_len,
             err_pars,
             N=N,
             adapt_limits=lims,
             glob_limits=lims,
             thin=thin,
             ext_len=ext_len,
             max_ext=max_ext)
ts = timer.time() - start
st1 = "t = %dm %.2fs" % (ts // 60., ts % 60.)
#MCMCParallelCPU.MCMCParallel(LogLikelihood_iid_mf,guess_pars,(Transit_aRs,time,flux),chain_len,err_pars,N=N,adapt_limits=lims,glob_limits=lims,thin=thin,ext_len=ext_len,max_ext=100)
par, par_err = Infer.AnalyseChains(conv, n_chains=N)
pylab.figure(2)
Infer.PlotCorrelations(conv, n_chains=N)

#check against normal version
# chain_len = 10000
# conv = 4000
# lims = (0,conv,4)
예제 #19
0
import Infer

#create example with some noise/systematics
tpar = [0,3.0,10,0.1,0.2,0.2,0.2,1.0,0.0]
time = np.linspace(-0.1,0.1,300)
flux = MF.Transit_aRs(tpar,time) + 0.001*np.sin(2*np.pi*40*time) + np.random.normal(0.,0.0005,time.size)

#construct the GP
gp = GeePea.GP(time,flux,p=tpar+[0.1,0.01,0.001],mf=MF.Transit_aRs)
gp.opt() #optimise

#run quick MCMC to test predictions:
ch_len = 10000
lims = (0,5000,10)
epar = [0,0,0,0.001,0,0,0,0,0,] + [0.001,0.01,0.001]
Infer.MCMC_N(gp.logPosterior,gp.p,(),ch_len,epar,adapt_limits=lims,glob_limits=lims,chain_filenames=['test_chain'])
p,perr = Infer.AnalyseChains(lims[1],chain_filenames=['test_chain'])
X = Infer.GetSamples(5000,100,chain_filenames=['test_chain']) #get samples from the chains
os.remove('test_chain.npy')

#standard plot
pylab.figure()
gp.plot()
#pylab.savefig('test.pdf')

#density plot for single parameter set
pylab.figure()
f,ferr = gp.predict()
GeePea.PlotDensity(time,f,ferr)
pylab.plot(time,flux,'ro',ms=3)
예제 #20
0
epar = [
    par_noise,
] * 7 + [
    0.0,
]
epar[2] = 0
fp = [0, 0, 0, 0, 0, 0, 0, 1]
wn = 0.6
f = mf(par, time) + np.random.normal(0, wn, time.size)

##########################################################################################

#fit with levenburg marquardt
NM_temp = Infer.Optimise(MF.LogLikelihood_iid_mf,
                         np.concatenate([par_guess, [
                             wn,
                         ]]), (mf, time, f),
                         fixed=fp)
LM1 = opt.leastsq(LM_ErrFunc,
                  NM_temp[:-1],
                  args=(mf, time, f, 1),
                  full_output=1)
rescale1 = (f - mf(LM1[0], time)).std()
LM1_par = LM1[0]
LM1_epar = np.sqrt(np.diag(LM1[1])) * rescale1

#fit with least squares - again a levenberg marquardt method (trust region is similar idea)
LM2 = opt.least_squares(LM_ErrFunc,
                        par_guess,
                        args=(mf, time, f, 1),
                        method='trf',