コード例 #1
0
ファイル: analyze.py プロジェクト: mikehankey/fireball_camera
event = sys.argv[1]
event.replace(".
file = "/var/www/html/out/" + sys.argv[1]
tstamp_prev = None
image_acc = None
m_image_acc = None
cap = cv2.VideoCapture(file)
cv2.namedWindow('pepe')

open("jpgs/" + event + ".txt")
while True:
    _ , frame = cap.read()
    if (_ is True):
        print ("Count: ", count)

        alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
        if image_acc is None:
            image_acc = np.empty(np.shape(frame))
        image_diff = cv2.absdiff(image_acc.astype(frame.dtype), frame,)
        hello = cv2.accumulateWeighted(frame, image_acc, alpha)

        mframe = frame
        mframe = cv2.cvtColor(mframe, cv2.COLOR_BGR2GRAY)
        mframe = cv2.GaussianBlur(mframe, (21, 21), 0)
        if m_image_acc is None:
            m_image_acc = np.empty(np.shape(mframe))
        m_image_diff = cv2.absdiff(m_image_acc.astype(mframe.dtype), mframe,)
        hello = cv2.accumulateWeighted(mframe, m_image_acc, alpha)

        sframe = cv2.convertScaleAbs(image_acc)
        cv2.imshow('pepe', sframe)
コード例 #2
0
def view(file, show = 0):

   # before processing dump frames from this file for 24 hour time lapse
   os.system("./dump-frames-tl.py " + file )

   config_file = ""
   try:
      (file_date, cam_num) = file.split("-")
      cam_num = cam_num.replace(".avi", "")
   except:
      cam_num = ""

   if cam_num == "":
      config = read_config(config_file)
   else: 
      #cam_num = sys.argv[1]
      config_file = "conf/config-" + cam_num + ".txt"
      config = read_config(config_file)



   #config = read_config(config_file)
   frame_time_data = []
   values = {}
   dir_name = os.path.dirname(file)
   file_name = file.replace(dir_name + "/", "")
   summary_file_name = file_name.replace(".avi", "-summary.txt")
   data_file_name = file_name.replace(".avi", ".txt")
   screen_cap_file_name = file_name.replace(".avi", ".jpg")
   object_file_name = file_name.replace(".avi", "-objects.jpg")
   time_file_name = file_name.replace(".avi", "-time.txt")
   capture_date = parse_file_date(file_name)
   #last_cal_date = # Get last / closest calibration date
   file_base_name = file_name.replace(".avi", "") 
   status = day_or_night(config, capture_date)

   # read in time file if it exists
   
   if os.path.isfile(dir_name + "/" + time_file_name):
      frame_time_data = read_time_file(dir_name + "/" + time_file_name)
      print ("FRAME TIME DATA LENGTH:", len(frame_time_data))
      time.sleep(1)
   else: 
      print ("no frame time data! " + dir_name + "/" + time_file_name ) 
      for x in range(0, 225): 
         frame_time_data.append("|||")


   fps_t = 0
   for ftd in frame_time_data:
      print ("FRAMEdata", ftd)
      fps, tc, tt, tx = ftd.split("|")
      if fps == "":
         fps = 0
      fps_t = int(float(fps_t)) + int(float(fps))
   if len(frame_time_data) > 0:
      avg_fps = fps_t / len(frame_time_data)
   else :
      avg_fps = 0

   

   print ("Viewing file: " + file)
   print ("Directory: " + dir_name)
   print ("File Name: " + file_name)
   print ("Summary File Name: " + summary_file_name)
   print ("Data File Name: " + data_file_name)
   print ("Screen Cap File Name: " + screen_cap_file_name)
   print ("Object File Name: " + object_file_name)
   print ("Capture Date: " + capture_date)
   print ("FPS: " + str(avg_fps))

   # make sure the file exists
   if os.path.isfile(file) is False:
      print("This file does not exist. Exiting.")
      return(0)
   else:
      print ("The file is ok.")

   #process video

   tstamp_prev = None
   image_acc = None
   last_frame = None
   nice_image_acc = None
   final_image = None
   cur_image = None
   frame_count = 0

   # open data log file
   fp = open(dir_name + "/" + data_file_name, "w")
   fp2 = open(dir_name + "/" + summary_file_name, "w")
   fp.write("frame|contours|x|y|w|h|color|fps|adjusted_unixtime|unixtime|time_offset\n")
   fp2.write("frame|contours|x|y|w|h|color|fps|adjusted_unixtime|unixtime|time_offset\n")


   #if show == 1:
   #   cv2.namedWindow('pepe') 


   cap = cv2.VideoCapture(file)
   time.sleep(2)
   xs = []
   ys = []
   motion_frames = []
   frames = []
   colors = []

   noise = 0

   while True:
      _ , frame = cap.read()
      frame_count = frame_count + 1
      frames.extend([frame])
      if frame is None:
         if frame_count <= 1:
            print("Bad file.")
            return(0)
         else:
            print("Processed ", frame_count, "frames.")
            # finish processing file and write output files

            total_motion = len(motion_frames)
            if total_motion < 3 :
               #this a BS capture. abort
               os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/dist/") 
               return(0)
 
            half_motion = int(round(total_motion/2,0))
            print ("key frame #1 : ", 1) 
            print ("key frame #2 : ", half_motion) 
            print ("key frame #3 : ", total_motion -1) 
            print ("Xs", xs)
            print ("Ys", ys)
            print ("MF", motion_frames)
            avg_color = sum(colors) / float(len(colors))
     
            print ("CL", colors)
            print ("Avg Color: ", avg_color)

            #print (motion_frames[1])
            #print (motion_frames[half_motion])
            #print (motion_frames[total_motion -1])

            #print(frames[motion_frames[1]])
            #print(frames[motion_frames[half_motion]])
            #print(frames[motion_frames[total_motion - 1]])

            object_file_image = (frames[motion_frames[1]] * .33) + (frames[motion_frames[half_motion]] * .33) + (frames[motion_frames[total_motion-2]] * .33) 
          
     
            x1 = xs[1]
            y1 = xs[1]
            x2 = xs[half_motion]
            y2 = xs[half_motion]
            x3 = xs[total_motion-2]
            y3 = xs[total_motion-2]

            xmax = max(xs)
            ymax = max(ys)
            xmin = min(xs)
            ymin = min(ys)
            skip = 0

            if xmax - xmin == 0 and ymax - ymin == 0:
               skip = 1

            straight_line = compute_straight_line(x1,y1,x2,y2,x3,y3)
            if (straight_line < 1 and straight_line > 0) or avg_color > 190:
               meteor_yn = "Y"
            else:
               meteor_yn = "N"

            if status == 'night':
               meteor_yn = "Y"
            else:
               meteor_yn = "N"

       

            #meteor_yn = "Y"
            if skip == 1:
               meteor_yn = "N"
               print ("Skipping not enough x,y movement!", xmax, xmin, ymax, ymin)
            if noise >= 5:
               meteor_yn = "N"
               print ("Skipping to much noise!", noise)
            if avg_fps < 20:
               meteor_yn = "N"
               print ("Skipping calibration file!", avg_fps)


            print ("Status:", status)
            print ("Straight Line:", straight_line)
            print ("Likely Meteor:", meteor_yn)


            obj_outfile = dir_name + "/" + object_file_name
            sc_outfile = dir_name + "/" + screen_cap_file_name 
            cv2.imwrite(obj_outfile, object_file_image)
            cv2.imwrite(sc_outfile, object_file_image)


            #write summary & data files

            fp.close()
            fp2.close()

            # prep event or capture for upload to AMS
            values['datetime'] = capture_date 
            values['motion_frames'] = total_motion 
            values['cons_motion'] = total_motion
            values['color'] = avg_color
            values['straight_line'] = straight_line
            values['meteor_yn'] = meteor_yn
            values['bp_frames'] = total_motion

            if meteor_yn == 'Y':
               try:
                  values['best_caldate'] = config['best_caldate']
               except:
                  config['best_caldate'] = '0000-00-00 00:00:00';
                  values['best_caldate'] = config['best_caldate']
               try:
                  log_fireball_event(config, file, dir_name + "/" + summary_file_name, dir_name + "/" + object_file_name, values)
               except:
                  print ("failed to upload event file.")
                  return(0)
               #move files to maybe dir
               print ("Move to maybe dir!")
               os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/maybe/") 
            else:
               log_motion_capture(config, dir_name + "/" + object_file_name, values)
               try:
                   log_motion_capture(config, dir_name + "/" + object_file_name, values)
               except:
                  print ("failed to upload capture file.")
                  return(0)
               print ("Move to false dir!")
               if (skip == 1 or noise >= 5) and status == 'night': 
                  os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/dist/") 
               elif avg_fps < 20:
                  os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/calvid/") 
               else:
                  os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/false/") 

            return(1)
      nice_frame = frame

      alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
      frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
      frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
      gray_frame = frame
      frame = cv2.GaussianBlur(frame, (21, 21), 0)
      if last_frame is None:
         last_frame = nice_frame
      if image_acc is None:
         image_acc = np.empty(np.shape(frame))
      image_diff = cv2.absdiff(image_acc.astype(frame.dtype), frame,)
      hello = cv2.accumulateWeighted(frame, image_acc, alpha)
      _, threshold = cv2.threshold(image_diff, 30, 255, cv2.THRESH_BINARY)
      thresh= cv2.dilate(threshold, None , iterations=2)
      (_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
      data = str(frame_count) + "|"

      color = 0
      contours = len(cnts)
      x,y,w,h = 0,0,0,0

      if contours > 3:
         noise = noise + 1

      if contours > 0:
          x,y,w,h = cv2.boundingRect(cnts[0])
          mx = x + w
          my = y + h
          cx = int(x + (w/2))
          cy = int(y + (h/2))
          color = gray_frame[cy,cx]
          xs.extend([x])
          ys.extend([y])
          colors.extend([color])
          motion_frames.extend([frame_count])
         
      line_data = str(frame_count) + "|" + str(contours) + "|" + str(x) + "|" + str(y) + "|" + str(w) + "|" + str(h) + "|" + str(color) + "|" + frame_time_data[frame_count-1] + "\n"

      fp.write(line_data)
      fp2.write(line_data)
      print (frame_count, contours, x,y,w,h,color)
コード例 #3
0
def show_loop(pipe_child):
    config = read_config()
    print(config['cam_ip'])

    device_lat = config['device_lat']
    device_lng = config['device_lng']
    device_operator = config['last_name'] + config['first_name']
    device_id = config['device_id']

    image_acc = None
    nice_image_acc = None
    tstamp_prev = None
    count = 0
    #time_start = datetime.datetime.now()
    time_start = time.time()
    frame = pipe_child.recv()
    frames = deque(maxlen=200)
    frame_times = deque(maxlen=200)
    frame_data = deque(maxlen=200)

    motion_on = 0
    motion_off = 0
    cnts = []
    lc = 1
    calibrate_now = 0
    calibrate_start = 0
    #sense_up = 0

    while True:
        frame = pipe_child.recv()
        frame_time = time.time()

        frames.appendleft(frame)
        frame_times.appendleft(frame_time)

        #frame_info =  str(motion_on) + "|" + str(motion_off) + "|" + str(len(cnts)) + "|"
        #frame_data.appendleft(frame_info)

        #if (count == 0):
        #calibrate on start (only if dark)
        #sys command to sense up
        #    calibrate_start = 1
        #if calibrate_start >= 0:
        #    calibrate_start = calibrate_start + 1

        if (lc % 11 == 0 and count == 5):
            dec_sec = datetime.datetime.fromtimestamp(
                int(frame_time)).strftime("%f")
            dec_sec_f = dec_sec[:2]
            cframe = frame
            format_time = datetime.datetime.fromtimestamp(
                int(frame_time)).strftime("%Y-%m-%d %H:%M:%S.")
            cv2.putText(
                cframe, "AMSMeteors.org / " + device_operator + " " +
                format_time + dec_sec_f + " UTC " + device_id + " " +
                device_lat + " " + device_lng, (10, frame.shape[0] - 10),
                cv2.FONT_HERSHEY_SIMPLEX, .4, (255, 255, 255), 1)
            cv2.imwrite("/var/www/html/out/latest.jpg", cframe)

        if count % 100 == 0:
            time_diff = frame_time - time_start
            #time_diff_seconds = time_diff.total_seconds()
            fps = count / time_diff
            print("FPS: " + str(fps))
            #ff.write("FPS: " + str(fps) + "\n")
            count = 1
            lc = lc + 1
            print("LC:" + str(lc))
            time_start = frame_time

        if count % 3 == 0:
            #if sense_up > 1:
            #   print ("Sense Up: ", sense_up)
            #if sense_up >= 100 and sense_up < 103:
            #   print ("Take calibration...")
            #   dec_sec = datetime.datetime.fromtimestamp(int(frame_time)).strftime("%f")
            #   dec_sec_f = dec_sec[:2]
            #   cframe = frame
            #   format_time = datetime.datetime.fromtimestamp(int(frame_time)).strftime("%Y-%m-%d %H:%M:%S.")
            #   cv2.putText(cframe, "AMSMeteors.org / " + device_operator + " " + format_time + dec_sec_f + " UTC " + device_id + " " + device_lat + " " + device_lng,
            #   (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255, 255, 255), 1)
            #   format_time = datetime.datetime.fromtimestamp(int(frame_time)).strftime("%Y%m%d%H%M%S")
            #   cal_file = "/var/www/html/out/cal" + format_time + ".jpg"
            #   cv2.imwrite(cal_file, cframe)
            #   motion_on = 0
            #   motion_off = 0
            #if sense_up == 106:
            #os.system("/var/www/html/write-serial.py sense_down")
            #    print ("Sense down.")
            #   r = requests.get("http://" + config['cam_ip'] + "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=50&paramstep=0&paramreserved=0&")
            #if sense_up > 200:
            #   calibrate_now = 0
            #   motion_on = 0
            #   motion_on = 0
            #   sense_up = 0

            alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)

            frame = cv2.resize(frame, (0, 0), fx=0.8, fy=0.8)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            frame = cv2.GaussianBlur(frame, (21, 21), 0)
            if image_acc is None:
                image_acc = np.empty(np.shape(frame))
            image_diff = cv2.absdiff(
                image_acc.astype(frame.dtype),
                frame,
            )
            hello = cv2.accumulateWeighted(frame, image_acc, alpha)

            _, threshold = cv2.threshold(image_diff, 30, 255,
                                         cv2.THRESH_BINARY)
            thresh = cv2.dilate(threshold, None, iterations=2)
            (_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)

            if len(cnts) == 0:
                motion_off = motion_off + 1
            elif len(cnts) > 2 and lc > 3:
                print("dropped frame", len(cnts))
                motion_off = motion_off + 1
                dropped = frames.pop()
                #frames.appendleft(last_frame)
            elif lc > 2:
                motion_on = motion_on + 1
                motion_off = 0
            if motion_off > 5 and motion_on < 3:
                motion_on = 0
            #if calibrate_now == 1:
            #   sense_up = sense_up + 1

            #if motion_off == 3 and motion_on >= 3:
            #r = requests.get("http://" + config['cam_ip'] + "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=25&paramstep=0&paramreserved=0&")

            if motion_off > 20 and motion_on >= 3 and calibrate_now == 0:
                #ff.write("RECORD BUFFER NOW!\n")
                motion_off = 0
                motion_on = 0
                i = 1000

                format_time = datetime.datetime.fromtimestamp(
                    int(frame_time)).strftime("%Y%m%d%H%M%S")
                outfile = "{}/{}.avi".format("/var/www/html/out", format_time)
                outfile_text = "{}/{}.txt".format("/var/www/html/out",
                                                  format_time)

                if record == 1:

                    df = open(outfile_text, 'w', 1)
                    dql = len(frame_times) - 1
                    time_diff = frame_times[1] - frame_times[dql]
                    fps = 200 / time_diff
                    print("FPS: ", fps)
                    writer = cv2.VideoWriter(
                        outfile, cv2.VideoWriter_fourcc(*'MJPG'), fps,
                        (frames[0].shape[1], frames[0].shape[0]), True)
                    while frames:
                        img = frames.pop()
                        ft = frame_times.pop()
                        format_time = datetime.datetime.fromtimestamp(
                            int(ft)).strftime("%Y-%m-%d %H:%M:%S.")
                        dec_sec = datetime.datetime.fromtimestamp(
                            int(frame_time)).strftime("%f")
                        format_time = format_time + dec_sec
                        #img_data  = frame_data.pop()
                        #fts = ft.strftime("%Y%m%d %H:%M:%S.%f|")
                        df.write(format_time + "\n")
                        #cv2.putText(img, img_time.strftime("AMSMeteors.org / " + device_operator + " %Y%m%d %H:%M:%S." + dec_sec_f + " UTC " + device_id + ": " + device_lat + " " + device_lng),
                        #(10, img.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1)
                        #cv2.imwrite("out/" + str(i) + ".jpg", img)
                        writer.write(img)
                        i = i + 1
                    writer.release()
                    df.close()
                    #r = requests.get("http://" + config['cam_ip'] + "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=50&paramstep=0&paramreserved=0&")
                    #calibrate_now = 1
                    #sense_up = sense_up + 1

            if motion_on > 0:
                #ff.write("motion_on " + str(motion_on) + "\n")
                print("motion_on " + str(motion_on) + "\n")
            #cv2.imshow('pepe', frame)

        #last_frame = frame
        count = count + 1
コード例 #4
0
def cam_loop(pipe_parent):
    #cv2.namedWindow("pepe")
    lc = 0
    tstamp_prev = None
    motion_on = 0
    motion_off = 0
    config = read_config()
    print(config['cam_ip'])

    cap = cv2.VideoCapture("rtsp://" + config['cam_ip'] +
                           "/av0_1&user=admin&password=admin")

    cv2.setUseOptimized(True)
    image_acc = None

    time.sleep(5)
    frames = deque(maxlen=200)
    frame_times = deque(maxlen=200)
    time_start = datetime.datetime.now()
    count = 0
    while True:
        _, frame = cap.read()
        if _ is True:
            frame_time = datetime.datetime.now()
            frames.appendleft(frame)
            frame_times.appendleft(frame_time)
            #pipe_parent.send(frame)

        if count % 300 == 0:
            time_diff = frame_time - time_start
            fps = count / time_diff.total_seconds()
            print("FPS: " + str(fps))
            count = 1
            lc = lc + 1
            print("LC:" + str(lc))
            time_start = frame_time

        if count % 3 == 0:
            #alpha = .25
            alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
            frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            frame = cv2.GaussianBlur(frame, (21, 21), 0)
            if image_acc is None:
                image_acc = np.empty(np.shape(frame))
            image_diff = cv2.absdiff(
                image_acc.astype(frame.dtype),
                frame,
            )
            hello = cv2.accumulateWeighted(frame, image_acc, alpha)
            _, threshold = cv2.threshold(image_diff, 30, 255,
                                         cv2.THRESH_BINARY)
            thresh = cv2.dilate(threshold, None, iterations=2)
            (_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)
            #cv2.imshow("pepe", frame)
            #cv2.imshow("pepe", image_diff)
            #cv2.waitKey(5)
            if len(cnts) == 0:
                motion_off = motion_off + 1
                print(len(cnts), motion_on, motion_off)
            elif len(cnts) < 30:
                print(len(cnts), motion_on, motion_off)
                motion_on = motion_on + 1
                motion_off = 0
                #cv2.imshow("pepe", cv2.convertScaleAbs(image_diff))
            else:
                print("CNTS:", len(cnts))
            if motion_off > 5 and motion_on < 5:
                motion_on = 0
            if lc < 3:
                motion_on = 0
            if motion_off > 10 and motion_on >= 5:
                r = requests.get(
                    "http://" + config['cam_ip'] +
                    "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=25&paramstep=0&paramreserved=0&"
                )

            if motion_off > 30 and motion_on >= 5:
                r = requests.get(
                    "http://" + config['cam_ip'] +
                    "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=50&paramstep=0&paramreserved=0&"
                )
                print("RECORD BUFFER NOW!\n")
                motion_on = 0
                format_time = frame_time.strftime("%Y%m%d%H%M%S")
                outfile = "{}/{}.avi".format("/var/www/html/out", format_time)
                outfile_text = "{}/{}.txt".format("/var/www/html/out",
                                                  format_time)

                df = open(outfile_text, 'w', 1)
                dql = len(frame_times) - 1
                time_diff = frame_times[1] - frame_times[dql]
                fps = 200 / time_diff.total_seconds()
                print("FPS: ", fps)
                writer = cv2.VideoWriter(
                    outfile, cv2.VideoWriter_fourcc(*'MJPG'), fps,
                    (frames[0].shape[1], frames[0].shape[0]), True)
                while frames:
                    img = frames.pop()
                    ft = frame_times.pop()
                    format_time = ft.strftime("%Y-%m-%d %H:%M:%S.")
                    dec_sec = ft.strftime("%f")
                    format_time = format_time + dec_sec
                    df.write(format_time + "\n")
                    writer.write(img)
                    #i = i + 1
                writer.release()
                df.close()
        count = count + 1
コード例 #5
0
def cam_loop(pipe_parent):
    #cv2.namedWindow("pepe")
    lc = 0
    tstamp_prev = None
    motion_on = 0
    motion_off = 0
    config = read_config()
    print (config['cam_ip'])

    cap = cv2.VideoCapture("rtsp://" + config['cam_ip'] + "/av0_1&user=admin&password=admin")

    #cv2.setUseOptimized(True)
    image_acc = None

    time.sleep(5)
    frames = deque(maxlen=200)
    frame_times = deque(maxlen=200)
    time_start = datetime.datetime.now()
    count = 0
    while True:
        _ , frame = cap.read()
        #if _ is True:
        #    frame_time = datetime.datetime.now()
        #    frames.appendleft(frame)
        #    frame_times.appendleft(frame_time)
        #    #pipe_parent.send(frame)

        if True:
            #alpha = .25
            alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
            #frame = cv2.resize(frame, (0,0), fx=0.25, fy=0.25)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            frame = cv2.GaussianBlur(frame, (21, 21), 0)
            if image_acc is None:
                image_acc = np.empty(np.shape(frame))
            image_diff = cv2.absdiff(image_acc.astype(frame.dtype), frame,)
            hello = cv2.accumulateWeighted(frame, image_acc, alpha)
            _,threshold = cv2.threshold(image_diff, 30, 255, cv2.THRESH_BINARY)
            thresh= cv2.dilate(threshold, None , iterations=2)
            (_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            if len(cnts) == 0:
               motion_off = motion_off + 1
            elif len(cnts) < 30 :
               motion_on = motion_on + 1
               motion_off = 0
               print (len(cnts), motion_on)
            if motion_off > 3 and motion_on < 3:
               motion_on = 0
            if count < 100:
               motion_on = 0
            #if motion_off > 10 and motion_on >=5:
            #   r = requests.get("http://" + config['cam_ip'] + "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=25&paramstep=0&paramreserved=0&")

            if motion_off > 10 and motion_on >= 3: 
            #   r = requests.get("http://" + config['cam_ip'] + "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=50&paramstep=0&paramreserved=0&")
               print("RECORD BUFFER NOW!\n")
               os.system("touch /home/pi/fireball_camera/write_buffer");
               time.sleep(2)
               os.system("rm /home/pi/fireball_camera/write_buffer");
               motion_on = 0
               #motion_off = 0
               count = 0
            count = count + 1
コード例 #6
0
def show_loop(pipe_child, shared_dict):
    #cv2.namedWindow("pepe")
    #config = read_config()

    print(config['cam_ip'])

    device_lat = config['device_lat']
    device_lng = config['device_lng']
    device_operator = config['first_name'] + " " + config['last_name']
    device_id = config['device_id']

    image_acc = None
    nice_image_acc = None
    tstamp_prev = None
    count = 0
    #time_start = datetime.datetime.now()
    time_start = time.time()
    frame = pipe_child.recv()
    frames = deque(maxlen=200)
    frame_times = deque(maxlen=200)
    #frame_data = deque(maxlen=200)

    motion_on = 0
    motion_off = 0
    cnts = []
    lc = 1
    calibrate_now = 0
    calibrate_start = 0
    #sense_up = 0

    while True:
        frame = pipe_child.recv()
        alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
        #lock.acquire()
        #print ("SHOW LOOP:", count)

        #frame = cv2.resize(frame, (0,0), fx=0.8, fy=0.8)
        #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        frame = cv2.GaussianBlur(frame, (21, 21), 0)
        if image_acc is None:
            image_acc = np.empty(np.shape(frame))
        image_diff = cv2.absdiff(
            image_acc.astype(frame.dtype),
            frame,
        )
        hello = cv2.accumulateWeighted(frame, image_acc, alpha)

        _, threshold = cv2.threshold(image_diff, 30, 255, cv2.THRESH_BINARY)
        thresh = cv2.dilate(threshold, None, iterations=2)
        (_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
        #print(len(cnts), motion_on, motion_off)
        #print(cnts)

        if len(cnts) > 1:
            shared_dict['noise'] = shared_dict['noise'] + 1

        if len(cnts) == 0:
            shared_dict['motion_off'] = shared_dict['motion_off'] + 1
            #middle_pixel = 0
            #avg_color = 0
        else:
            # if the total cnts are over 1 then cat to the log!
            dist_time = datetime.datetime.now()
            log_entry = str(cam_num) + "|" + str(
                len(cnts)) + "|" + str(dist_time)
            cmd = "/bin/echo \"" + log_entry + "\" >> /tmp/distortion.log"
            os.system(cmd)

            #area = cv2.contourArea(cnts[0])
            #perim = cv2.arcLength(cnts[0], True)
            #print ("Perim:", perim)
            #x,y,w,h = cv2.boundingRect(cnts[0])
            #x2 = x+w
            #y2 = y+h
            #mx = int(x + (w/2))
            #my = int(y + (h/2))
            #print ("XY:", x,x2,y,y2)
            #middle_pixel = frame[my,mx]
            #middle_sum = np.sum(middle_pixel)
            #crop_frame = frame[y:y2,x:x2]
            #avg_color_per_row = np.average(crop_frame, axis=0)
            #avg_color = np.average(avg_color_per_row, axis=0)

            #shared_dict['xywh'] = (x,y,w,h)
            #shared_dict['area'] = area
            #shared_dict['perim'] = perim
            #shared_dict['middel_pixel'] = middle_pixel
            #shared_dict['avg_color'] = avg_color

            shared_dict['motion_on'] = shared_dict['motion_on'] + 1
            shared_dict['motion_off'] = 0
            shared_dict['cnts'] = len(cnts)

            print("MMO:", shared_dict['cnts'], shared_dict['motion_on'],
                  shared_dict['motion_off'])

        if shared_dict['motion_off'] > 5 and shared_dict['motion_on'] < 3:
            shared_dict['motion_on'] = 0
        #cv2.imshow('pepe', image_diff)
        #cv2.waitKey(5)
        count = count + 1
コード例 #7
0
def stack_calibration_video(outfile):
    frames = deque(maxlen=200)
    count = 0
    show = None
    #cv2.namedWindow('pepe')

    file_exists = Path(outfile)
    if (file_exists.is_file()):
        print("File found.")
    else:
        print("File not found.", outfile)
        exit()

    cap = cv2.VideoCapture(outfile)
    time.sleep(2)
    count = 0
    tstamp_prev = None
    image_acc = None
    dst = None
    dst_x = None
    while count < 89:
        _, frame = cap.read()
        if frame is None:
            print("Frame is none.")
            continue
        frames.appendleft(frame)

        alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
        if count == 80:
            sframe = frame

        #alpha = .23
        alpha = .5
        nice_frame = frame
        #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #frame = cv2.GaussianBlur(frame, (21, 21), 0)
        if show is None:
            show = np.empty(np.shape(frame))
        if image_acc is None:
            image_acc = np.empty(np.shape(frame))
        if dst is None:
            dst = np.empty(np.shape(frame))
        image_diff = cv2.absdiff(
            image_acc.astype(frame.dtype),
            frame,
        )
        hello = cv2.accumulateWeighted(frame, image_acc, alpha)
        abs_frame = cv2.convertScaleAbs(frame)
        abs_image_acc = cv2.convertScaleAbs(image_acc)
        if dst is None:
            dst = abs_image_acc
        else:
            dst = cv2.convertScaleAbs(dst)
        dst = cv2.addWeighted(abs_frame, alpha, dst, alpha, 0)
        #nice_avg = cv2.convertScaleAbs(image_dst)
        #cv2.imshow('pepe', dst)
        #cv2.waitKey(1)
        count = count + 1

    image_acc = np.empty(np.shape(frame))

    framex = frames[45]
    framey = frames[88]
    image_diff = cv2.absdiff(
        framex.astype(framey.dtype),
        framey,
    )
    #cv2.imshow("pepe",image_diff)
    #cv2.waitKey(0)

    for i in range(1, 5):
        k = i * 5 + 30
        frame = frames[i + 25]
        #cv2.imshow('pepe', frame)
        #cv2.waitKey(0)
        image_diff = cv2.absdiff(
            image_acc.astype(frame.dtype),
            frame,
        )
        #cv2.imshow('pepe', image_diff)
        #cv2.waitKey(0)
        hello = cv2.accumulateWeighted(frame, image_acc, alpha)
        abs_frame = cv2.convertScaleAbs(frame)
        abs_image_acc = cv2.convertScaleAbs(image_acc)
        if dst_x is None:
            dst_x = abs_image_acc
        else:
            dst_x = cv2.convertScaleAbs(dst_x)
        #cv2.imshow('pepe', dst_x)
        #cv2.waitKey(0)

    print("Writing out files.")
    jpg_file = outfile.replace(".avi", ".jpg")
    print(jpg_file)
    print(dst)
    cv2.imwrite(jpg_file, dst)

    jpg_file_x = outfile.replace(".avi", "-x.jpg")
    cv2.imwrite(jpg_file_x, dst_x)
    jpg_file = jpg_file.replace(".jpg", "-single.jpg")
    sframe = cv2.convertScaleAbs(sframe)
    cv2.imwrite(jpg_file, sframe)
    print("Done")
コード例 #8
0
def view(file, show=0):
    nostars = 0
    skip = 0
    avg_color = 0
    max_cons_motion = 0
    straight_line = -1
    final_image = None
    start_time = int(time.time())

    el = file.split("/")
    act_file = el[-1]
    dir = file.replace(act_file, "")

    acc_file = act_file.replace(".mp4", "-acc.jpg")
    star_file = act_file.replace(".mp4", "-stars.jpg")

    acc_file = dir + "stars/" + acc_file
    star_file = dir + "stars/" + star_file

    # before processing dump frames from this file for 24 hour time lapse
    #os.system("./dump-frames-tl.py " + file )

    config_file = ""
    try:
        (file_date, cam_num) = file.split("-")
        cam_num = cam_num.replace(".avi", "")
    except:
        cam_num = ""

    if cam_num == "":
        config = read_config(config_file)
    else:
        #cam_num = sys.argv[1]
        config_file = "conf/config-" + cam_num + ".txt"
        config = read_config(config_file)

    #config = read_config(config_file)
    frame_time_data = []
    values = {}
    dir_name = os.path.dirname(file) + "/"
    file_name = file.replace(dir_name, "")
    summary_file_name = file_name.replace(".mp4", "-summary.txt")
    data_file_name = file_name.replace(".mp4", ".txt")
    screen_cap_file_name = file_name.replace(".mp4", ".jpg")
    object_file_name = file_name.replace(".mp4", "-objects.jpg")
    time_file_name = file_name.replace(".mp4", "-time.txt")
    capture_date = parse_file_date(file_name)
    #last_cal_date = # Get last / closest calibration date
    file_base_name = file_name.replace(".mp4", "")
    print(capture_date)
    status = day_or_night(config, capture_date)

    if status == "day":
        print("Skip this daytime file.")
        move_file(dir_name + file_name, "day")
        return ()

    # read in time file if it exists

    #if os.path.isfile(dir_name + "/" + time_file_name):
    #   frame_time_data = read_time_file(dir_name + "/" + time_file_name)
    #   print ("FRAME TIME DATA LENGTH:", len(frame_time_data))
    #   time.sleep(1)
    #else:
    #   print ("no frame time data! " + dir_name + "/" + time_file_name )
    #   for x in range(0, 225):
    #      frame_time_data.append("|||")

    #fps_t = 0
    #for ftd in frame_time_data:
    #   print ("FRAMEdata", ftd)
    #   fps, tc, tt, tx = ftd.split("|")
    #   if fps == "":
    #      fps = 0
    #   fps_t = int(float(fps_t)) + int(float(fps))
    #if len(frame_time_data) > 0:
    #   avg_fps = fps_t / len(frame_time_data)
    #else :
    #   avg_fps = 0

    avg_fps = 25

    print("Viewing file: " + file)
    print("Directory: " + dir_name)
    print("File Name: " + file_name)
    print("Summary File Name: " + summary_file_name)
    print("Data File Name: " + data_file_name)
    print("Screen Cap File Name: " + screen_cap_file_name)
    print("Object File Name: " + object_file_name)
    print("Capture Date: " + capture_date)
    #print ("FPS: " + str(avg_fps))

    # make sure the file exists
    if os.path.isfile(file) is False:
        print("This file does not exist. Exiting.")
        return (0)
    else:
        print("The file is ok.")

    #process video

    tstamp_prev = None
    image_acc = None
    last_frame = None
    last_gray_frame = None
    nt_acc = None
    nice_image_acc = None
    final_image = None
    cur_image = None
    frame_count = 0

    # open data log file
    #fp = open(dir_name + "/" + data_file_name, "w")
    #fp2 = open(dir_name + "/" + summary_file_name, "w")
    #fp.write("frame|contours|x|y|w|h|color|fps|adjusted_unixtime|unixtime|time_offset\n")
    #fp2.write("frame|contours|x|y|w|h|color|fps|adjusted_unixtime|unixtime|time_offset\n")

    show = 1
    if show == 1:
        cv2.namedWindow('pepe')

    #cap = skvideo.io.VideoCapture(file)
    cap = cv2.VideoCapture(file)
    time.sleep(2)
    xs = []
    ys = []
    motion_frames = []
    frames = []
    colors = []

    show_sw = 0

    noise = 0
    prev_motion = 0
    cons_motion = 0
    motion_events = []

    while True:
        _, frame = cap.read()
        if frame is not None:
            frame[680:720, 0:620] = [0, 0, 0]
        frame_count = frame_count + 1
        #frames.extend([frame])

        if frame is None or nostars > 5:
            if frame_count <= 1:
                print("Bad file.")
                return (0)
            else:
                end_time = int(time.time())
                elapsed = end_time - start_time
                print("Processed ", frame_count, "frames. in ", elapsed,
                      "seconds")
                # finish processing file and write output files

                total_motion = len(motion_frames)

                if total_motion > 3:
                    half_motion = int(round(total_motion / 2, 0))
                    print("key frame #1 : ", 1)
                    print("key frame #2 : ", half_motion)
                    print("key frame #3 : ", total_motion - 1)
                    print("Xs", xs)
                    print("Ys", ys)
                    print("MF", motion_frames)
                    avg_color = sum(colors) / float(len(colors))

                    print("CL", colors)
                    print("Avg Color: ", avg_color)

                    #object_file_image = (frames[motion_frames[1]] * .33) + (frames[motion_frames[half_motion]] * .33) + (frames[motion_frames[total_motion-2]] * .33)

                    x1 = xs[1]
                    y1 = xs[1]
                    x2 = xs[half_motion]
                    y2 = xs[half_motion]
                    x3 = xs[total_motion - 2]
                    y3 = xs[total_motion - 2]

                    xmax = max(xs)
                    ymax = max(ys)
                    xmin = min(xs)
                    ymin = min(ys)
                    skip = 0

                    if xmax - xmin == 0 and ymax - ymin == 0:
                        skip = 1

                    straight_line = compute_straight_line(
                        x1, y1, x2, y2, x3, y3)
                    if len(motion_events) > 3:
                        max_cons_motion = max(motion_events)
                    if (straight_line < 1
                            and straight_line >= 0) or avg_color > 190:
                        meteor_yn = "Y"
                    else:
                        meteor_yn = "N"

                if status == 'night':
                    meteor_yn = "Y"
                else:
                    meteor_yn = "N"

                if total_motion <= 3:
                    meteor_yn = "N"

                #meteor_yn = "Y"
                if skip == 1:
                    meteor_yn = "N"
                    print("Skipping not enough x,y movement!", xmax, xmin,
                          ymax, ymin)
                if noise >= 5:
                    meteor_yn = "N"
                    print("Skipping to much noise!", noise)
                if avg_fps < 20:
                    meteor_yn = "N"
                    print("Skipping calibration file!", avg_fps)

                # write out the stacked image_acc and the stacked star file
                # nt_acc

                print("Writing:", acc_file)
                print("Writing:", star_file)

                #image_acc_cl = cv2.cvtColor(image_acc, cv2.COLOR_GRAY2BGR)
                #en_image_acc = Image.fromarray(cv2.convertScaleAbs(image_acc))

                enhancer = ImageEnhance.Brightness(final_image)
                enhanced_image = enhancer.enhance(1)
                np_enhanced_image = np.asarray(enhanced_image)
                #cv2.imwrite(acc_file, np_enhanced_image)

                print("Total Stars: ", total_stars)
                if total_stars == 0:
                    star_file = star_file.replace("/stars/", "/clouds/")
                    acc_file = acc_file.replace("/stars/", "/clouds/")
                cv2.imwrite(acc_file, np_enhanced_image)

                if total_stars > 0:
                    cv2.imwrite(star_file, star_image)
                #cv2.imshow('pepe', star_image)
                #cv2.waitKey(100)

                print("Status:", status)
                print("Total Motion:", total_motion)
                print("Cons Motion:", cons_motion)
                print("Straight Line:", straight_line)
                print("Likely Meteor:", meteor_yn)

                #obj_outfile = dir_name + "/" + object_file_name
                #sc_outfile = dir_name + "/" + screen_cap_file_name
                #cv2.imwrite(obj_outfile, object_file_image)
                #cv2.imwrite(sc_outfile, object_file_image)

                #write summary & data files

                #fp.close()
                #fp2.close()
                # prep event or capture for upload to AMS
                values['datetime'] = capture_date
                values['motion_frames'] = total_motion
                values['cons_motion'] = max_cons_motion
                values['color'] = avg_color
                values['straight_line'] = straight_line
                values['meteor_yn'] = meteor_yn
                values['bp_frames'] = total_motion
                print("ending here...")
                exit()

                if meteor_yn == 'Y':
                    try:
                        values['best_caldate'] = config['best_caldate']
                    except:
                        config['best_caldate'] = '0000-00-00 00:00:00'
                        values['best_caldate'] = config['best_caldate']
                    #try:
                    #log_fireball_event(config, file, dir_name + "/" + summary_file_name, dir_name + "/" + object_file_name, values)
                    #except:
                    #   print ("failed to upload event file.")
                    #   return(0)
                    #move files to maybe dir
                    print("Move to maybe dir!")
                    #os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/maybe/")

                    move_file(dir_name + file_name, "maybe")

                else:
                    #log_motion_capture(config, dir_name + "/" + object_file_name, values)
                    #try:
                    #log_motion_capture(config, dir_name + "/" + object_file_name, values)
                    #except:
                    #   print ("failed to upload capture file.")
                    #   return(0)
                    #print ("Move to false dir!")
                    if (skip == 1 or noise >= 5) and status == 'night':
                        #os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/dist/")
                        move_file(dir_name + file_name, "dist")

                    elif avg_fps < 20:
                        move_file(dir_name + file_name, "calvid")
                    else:
                        if total_motion >= 3:
                            move_file(dir_name + file_name, "false")
                        else:
                            move_file(dir_name + file_name, "no_motion_night")

                return (1)

        # main video processing loop here
        if status == "day":
            mod_skip = 5
        else:
            mod_skip = 5

        if frame_count % mod_skip == 0:
            nice_frame = frame

            alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
            frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
            #print ("AFTER:", np.shape(frame))

            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray_frame = frame
            # if last_gray_frame is None:
            #    last_gray_frame = gray_frame
            # else:
            #    gray_frame_cmb = gray_frame + last_gray_frame
            #    last_gray_frame = gray_frame
            #    gray_frame = gray_frame_cmb

            frame = cv2.GaussianBlur(frame, (21, 21), 0)
            if last_frame is None:
                last_frame = nice_frame
            if image_acc is None:
                image_acc = np.empty(np.shape(frame))

            image_diff = cv2.absdiff(
                image_acc.astype(frame.dtype),
                frame,
            )
            alpha = .076
            hello = cv2.accumulateWeighted(frame, image_acc, alpha)
            _, threshold = cv2.threshold(image_diff, 30, 255,
                                         cv2.THRESH_BINARY)
            thresh = cv2.dilate(threshold, None, iterations=2)
            (_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)

            frame_img = Image.fromarray(frame)
            if final_image is None:
                final_image = frame_img
            final_image = ImageChops.lighter(final_image, frame_img)

            if frame_count > 5:
                np_final_image = np.asarray(final_image)
                gray_nice_frame = cv2.cvtColor(nice_frame, cv2.COLOR_BGR2GRAY)
                total_stars, star_image = find_stars(gray_nice_frame)
                if total_stars == 0:
                    print("Cloudy...???")
                    nostars = nostars + 1
                else:
                    nostars = 0
            # if you want to stack the accumulated frames..
            #image_acc_pil = Image.fromarray(cv2.convertScaleAbs(image_acc))
            #final_image=ImageChops.lighter(final_image,image_acc_pil)

            data = str(frame_count) + "|"

            color = 0
            contours = len(cnts)
            x, y, w, h = 0, 0, 0, 0

            if contours > 3:
                noise = noise + 1

            if contours > 0 and frame_count > 5:
                x, y, w, h = cv2.boundingRect(cnts[0])
                mx = x + w
                my = y + h
                cx = int(x + (w / 2))
                cy = int(y + (h / 2))
                color = gray_frame[cy, cx]
                xs.extend([x])
                ys.extend([y])
                colors.extend([color])

                motion_frames.extend([frame_count])
                prev_motion = 1
                cons_motion = cons_motion + 1
            else:
                if cons_motion > 0:
                    motion_events.append(cons_motion)
                prev_motion = 0

            line_data = ""
            #line_data = str(frame_count) + "|" + str(contours) + "|" + str(x) + "|" + str(y) + "|" + str(w) + "|" + str(h) + "|" + str(color) + "|" + frame_time_data[frame_count-1] + "\n"

            #fp.write(line_data)
            #fp2.write(line_data)
            print(frame_count, contours, x, y, w, h, color)
            if frame_count % 10 == 0:
                np_final_image = np.asarray(final_image)
                np_star_image = np.asarray(star_image)
                small_star_image = cv2.resize(star_image, (0, 0),
                                              fx=0.5,
                                              fy=0.5)
                #cv2.imshow('pepe', small_star_image)
                cv2.imshow('pepe', np_final_image)
                #cv2.imshow('pepe', cv2.convertScaleAbs(image_acc))
                cv2.waitKey(1)
コード例 #9
0
def show_loop(pipe_child):
    image_acc = None
    cal_image = None
    nice_image_acc = None
    stack = None
    tstamp_prev = None
    cv2.namedWindow('pepe')
    count = 0
    time_start = datetime.datetime.now()
    Q = None
    frame = pipe_child.recv()
    #frames = deque(np.empty(np.shape(frame)), maxlen=256)
    frames = deque(maxlen=256)
    firstFrame = None
    motion_on = 0
    motion_off = 0

    while True:
        frame = pipe_child.recv()
        frames.appendleft(frame)
        if firstFrame is None:
            firstFrame = frame 
            stack = frame 
            continue

        if count % 1 == 0: 
            nice_frame = frame
            alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
            if nice_image_acc is None:
                nice_image_acc = np.empty(np.shape(frame))
            nice_image_diff = cv2.absdiff(nice_image_acc.astype(frame.dtype), nice_frame,)
            hello = cv2.accumulateWeighted(nice_frame, nice_image_acc, alpha)
            #hellp = cv2.addWeighted(nice_image_acc,.5, nice_frame, .5, .5 )
            stack = stack + frame
            if count % 30 == 0:
               stack = frame
            nice_avg = cv2.convertScaleAbs(nice_image_acc)

            frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            frame = cv2.GaussianBlur(frame, (21, 21), 0)
            if image_acc is None:
                image_acc = np.empty(np.shape(frame))
            image_diff = cv2.absdiff(image_acc.astype(frame.dtype), frame,)
            hello = cv2.accumulateWeighted(frame, image_acc, alpha)

            if cal_image is None:
               cal_image = nice_image_acc

            if count % 30 == 0:
               cal_image = cal_image + nice_image_acc

            cal_image_show = cv2.convertScaleAbs(cal_image)

            if count % 100 == 0: 
               cal_iamge = nice_image_acc

            #avg = cv2.convertScaleAbs(image_acc)
            _, threshold = cv2.threshold(image_diff, 30, 255, cv2.THRESH_BINARY)
            thresh= cv2.dilate(threshold, None , iterations=2)
            (_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            if len(cnts) == 0:
               motion_off = motion_off + 1
            else:
               motion_on = motion_on + 1
               motion_off = 0

            #for c in cnts:
            ##   if cv2.contourArea(c) < 100:
            #       continue
            #   motion_on = motion_on + 1
            #   motion_off = 0
            if motion_off > 10 and motion_on < 10:
               motion_on = 0
               motion_off = 0

            if motion_off > 20 and motion_on > 10:
               print ("RECORD BUFFER NOW!")
               motion_off = 0
               motion_on = 0
               #write_buffer(frames)
               i = 1000 
               timestamp = datetime.datetime.now()
               outfile = "{}/{}.avi".format("out",
               timestamp.strftime("%Y%m%d-%H%M%S"))
               if record == 1: 
                   writer = cv2.VideoWriter(outfile, cv2.VideoWriter_fourcc(*'MJPG'), 25, (frames[0].shape[1], frames[0].shape[0]), True)
                   while frames:
                       img = frames.pop()

                       #cv2.putText(img, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S.%f%p"),
                       #(10, img.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
                       #cv2.imwrite("out/" + str(i) + ".jpg", img)
                       writer.write(img)
                       i = i + 1
                   writer.release()

            print ("motion_on, motion off, contours", motion_on, motion_off, len(cnts))
            #(x, y, w, h) = cv2.boundingRect(c)
            #cv2.rectangle(frame, (x, y), (x + w, y + w), (0, 255, 0), 2)

            #closed = cv2.morphologyEx(dilated, cv2.MORPH_CLOSE, MORPH_KERNEL)
            cv2.imshow('pepe', nice_frame)
            #cv2.imshow('pepe', cal_image_show)
            #cv2.imshow('pepe', cv2.convertScaleAbs(image_diff))
            cv2.waitKey(1)

        count = count + 1

        now = datetime.datetime.now()
        delta = now - time_start 
        limit = delta.total_seconds()