Esempio n. 1
0
def restart_cam(cam_num):
    cfile = "conf/config-" + cam_num + ".txt"
    config = read_config(cfile)
    url = "http://" + config[
        'cam_ip'] + "/cgi-bin/restart_cgi?user=admin&pwd=" + config['cam_pwd']
    print("RESTART: ", url)
    r = requests.get(url)
Esempio n. 2
0
def ping_cam(cam_num):
   config = read_config("conf/config-" + str(cam_num) + ".txt")
   cmd = "ping -c 1 " + config['cam_ip']
   response = os.system(cmd)
   if response == 0:
      print ("Cam is up!")
      return(1)
   else:
      print ("Cam is down!")
      return(0)
def focus(config_file, cam_num):
    config = {}
    cv2.namedWindow('pepe')
    config = read_config(config_file)

    mask_file = "conf/mask-" + str(cam_num) + ".txt"
    file_exists = Path(mask_file)
    print("MASK FILE: ", mask_file)
    mask_exists = 0
    if (file_exists.is_file()):
        print("File found.")
        ms = open(mask_file)
        for lines in ms:
            line, jk = lines.split("\n")
            exec(line)
        ms.close()
        #mask_exists = 1
        #(sm_min_x, sm_max_x, sm_min_y, sm_max_y) = still_mask

    #config['cam_ip'] = ip
    cap = cv2.VideoCapture("rtsp://" + config['cam_ip'] + "/av0_0")
    for fc in range(0, 10000):
        _, frame = cap.read()
        if frame is not None:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            frame[680:720, 0:620] = [0]
            frame[580:720, 0:1280] = [0]
            if mask_exists == 1:
                frame[sm_min_y:sm_max_y, sm_min_x:sm_max_x] = [0]
            max_px = np.amax(frame)
            _, threshold = cv2.threshold(frame, max_px - 5, 255,
                                         cv2.THRESH_BINARY)
            (_, cnts, xx) = cv2.findContours(threshold.copy(),
                                             cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)
            if len(cnts) > 0:
                x, y, w, h = cv2.boundingRect(cnts[0])
                cv2.rectangle(frame, (x - 5, y - 5), (x + w + 5, y + h + 5),
                              (255), 1)
                crop = frame[y - 20:y + 20, x - 20:x + 20]
                _, crop_threshold = cv2.threshold(crop, max_px - 15, 255,
                                                  cv2.THRESH_BINARY)
                #(_, cnts2, xx) = cv2.findContours(crop.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
                #x,y,w,h = cv2.boundingRect(cnts2[0])

                if (fc % 10 == 0):
                    print(w, h)

                showme = cv2.resize(crop, (0, 0), fx=2.5, fy=2.5)
                #showme = cv2.resize(frame, (0,0), fx=.5, fy=.5)
                #showme = cv2.resize(threshold, (0,0), fx=.5, fy=.5)
                cv2.imshow('pepe', showme)
                cv2.waitKey(1)
Esempio n. 4
0
def is_it_clear(cam_num):
    # load the image
    config = read_config("conf/config-" + str(cam_num) + ".txt")
    image = get_cap(config)

    avg_color_per_row = np.average(image, axis=0)
    r, g, b = np.average(avg_color_per_row, axis=0)
    if (100 < r < 145) and (100 < g < 145) and (100 < b < 145):
        print(str(cam_num) + " Looks clear :)")
        print(r, g, b)
    else:
        print(str(cam_num) + " Looks cloudy :(")
        print(r, g, b)
Esempio n. 5
0
def get_latest_pic(cam_num, cap_ip):
    if cam_ip == "":
        config_file = "conf/config-" + str(cam_num) + ".txt"
        outfile = "/var/www/html/out/latest-" + str(cam_num) + ".jpg"

        config = read_config(config_file)
    else:
        config = {}
        config['cam_ip'] = cam_ip
        el = cam_ip.split(".")
        outfile = "/var/www/html/out/latest-" + str(el[-1]) + ".jpg"
    cap = cv2.VideoCapture("rtsp://" + config['cam_ip'] + "/av0_0")

    cv2.setUseOptimized(True)

    _, frame = cap.read()
    #frame = cv2.resize(frame, (0,0), fx=1, fy=.75)

    cv2.imwrite(outfile, frame)
Esempio n. 6
0
def setup_cam(cam):
   if cam == '':
      print ("ERROR NO CAM ID!")
      exit()
   else:
      print ("Turning on cam# ", cam)
      #os.system("./pizero-relay.py cam_on " + str(cam))
   #time.sleep(15)
   cont = input("Now is your time to focus. Press enter once done. ")

  
   config = read_config()
   cam_ip = "192.168.1.88"

   # set the camera defaults, wait for reboot, then do it again
   os.system("./camera_defaults.py " + cam_ip)
   time.sleep(35)
   os.system("./camera_defaults.py " + cam_ip)
   os.system("./camera-settings.py " )

   # get the mac address
   file = open("caminfo/" + str(cam) + ".txt", "w")
   url = "http://" + str(cam_ip) + "/cgi-bin/sysparam_cgi?user=admin&pwd="+ config['cam_pwd'] 
   print (url)
   r = requests.get(url)
   lines = r.text.split("\n") 
   for line in lines:
      if "MACAddress" in line:
         file.write("<CamID>" + str(cam) + "</CamID>\n");
         line = line.replace("\t", "");
         file.write(line)

   file.close()
   # set boot proto to dhcp
   url = "http://" + str(cam_ip) + "/cgi-bin/network_cgi?user=admin&pwd="+ config['cam_pwd'] + "&action=set&BootProto=dhcp"
   print (url)
   r = requests.get(url)
   print (r.text)
Esempio n. 7
0
   def __init__(self, master):
      self.master = master
      print ("Cal class init")
      # IMAGE FILE PATHS AND NAMES     
      self.image_path = None
      self.wcs_path = None
      self.man_sources = []
      self.data_list = []
      self.image = None
      self.new_image = None
      self.starmap_image = None
      self.fireball_image = None
      self.active_image = None
      self.mask_on = None
      self.mask_image = None

      self.starlist_array = None
      self.padding = 0
      self.image_width = 640 + (self.padding * 2)  # 840
      self.image_height = 360 + (self.padding * 2) # 560

      cam_num = 1 
      config_file = "/home/ams/fireball_camera/conf/config-" + str(cam_num) + ".txt"
      config = read_config(config_file)
     
      self.cal_time = None
      self.obs_code = None
      self.loc_lat = config['device_lat']
      self.loc_lon = config['device_lng'] 
      self.loc_alt = config['device_alt'] 
      self.fb_dt = None

      self.star_thresh = 7
      self.odds_to_solve = 10000
      self.code_tolerance = .3


      # FRAME 1
      # Build Calibration Layout 
      self.cal_frame1 = tk.Frame(self.master, borderwidth=1, bg='black', height=50, width=650)
      # info at top of page
      self.cal_frame1.pack_propagate(0)
      self.button_open = tk.Button(self.cal_frame1, text="Open File", command=self.OpenImage)
      self.button_open.pack(side=tk.LEFT)
      self.button_set_location_time = tk.Button(self.cal_frame1, text='Set Loc/Time', command=self.button_set_location_time).pack(padx=1, pady=1, side=tk.LEFT)

      self.filename_label_value = tk.StringVar()
      self.filename_label = tk.Label(self.cal_frame1, textvariable=self.filename_label_value).pack(padx=1,pady=1,side=tk.LEFT)
      self.filename_label_value.set("nofile2")

      self.cal_time_label_value = tk.StringVar()
      self.cal_time_label = tk.Label(self.cal_frame1, textvariable=self.cal_time).pack(padx=1,pady=1,side=tk.LEFT)
      self.cal_time_label_value.set("No cal date set yet. ")


      self.cal_frame1.pack(side=tk.TOP)

      # FRAME 2
      # image canvas 
      canvas_width = self.image_width
      canvas_height = self.image_height
      self.cal_frame2 = tk.Frame(self.master, bg='white', borderwidth=1, width=canvas_width+1, height=canvas_height+1, )
      self.image_canvas = tk.Canvas(self.cal_frame2, width=canvas_width, height=canvas_height, cursor="cross")
      self.image_canvas.extra = "image_canvas"
      self.image_canvas.bind('<Motion>', self.motion)
      self.image_canvas.pack()
      self.cal_frame2.extra = 'image_canvas_frame'

      self.image_canvas.bind('<Button-1>', self.mouseClick)
      self.image_canvas.bind('<Button-3>', self.mouseRightClick)
      self.cal_frame2.pack(side=tk.TOP)

      # FRAME 3 - Action Buttons
      self.cal_frame3 = tk.Frame(self.master, bg='blue', height=50, width=self.image_width)
      self.cal_frame3_unsolved()
      self.cal_frame3.pack(side=tk.TOP)
      self.cal_frame3.pack_propagate(0)

      # FRAME 4 

      self.cal_frame4 = tk.Frame(self.master, bg='blue', height=300, width=self.image_width)

      self.container_far_left = tk.Frame(self.cal_frame4)

      self.fcfl = tk.Frame(self.container_far_left)
      self.star_thresh_label = tk.Label(self.fcfl, text="Star Threshold" ).pack(padx=1,pady=1,side=tk.TOP)
      self.e1 = tk.Entry(self.fcfl, textvariable=self.star_thresh)
      self.e1.insert(0, self.star_thresh)
      self.e1.pack()
      self.fcfl.pack(side=tk.TOP)

      self.fcfl2 = tk.Frame(self.container_far_left)
      self.odds_to_solve_label = tk.Label(self.fcfl2, text="Odds To Solve" ).pack(padx=1,pady=1,side=tk.TOP)
      self.e2 = tk.Entry(self.fcfl2, textvariable=self.odds_to_solve)

      self.e2.insert(0, self.odds_to_solve)
      self.e2.pack(side=tk.TOP)
      self.fcfl2.pack(side=tk.TOP)

      self.fcfl3 = tk.Frame(self.container_far_left)
      self.code_tolerance = tk.Label(self.fcfl2, text="Code Tolerance" ).pack(padx=1,pady=1,side=tk.TOP)
      self.e3 = tk.Entry(self.fcfl3, textvariable=self.code_tolerance)

      self.e3.insert(0, ".3")
      self.e3.pack(side=tk.TOP)
      self.fcfl3.pack(side=tk.TOP)


      self.container_far_left.pack(side=tk.LEFT)


      self.container_left = tk.Frame(self.cal_frame4)
      # Sliders
      self.field_container = tk.Frame(self.container_left)
      self.brightness_label = tk.Label(self.field_container, text="Brightness" ).pack(padx=1,pady=1,side=tk.TOP)
      self.brightness_slider = tk.Scale(self.field_container, from_=-100, to=100, orient=tk.HORIZONTAL, command=self.updateBrightness).pack(padx=1, pady=1, side=tk.BOTTOM)
      self.field_container.pack(side=tk.TOP)

      self.field_container2 = tk.Frame(self.container_left)
      contrast_label = tk.Label(self.field_container2, text="Contrast" ).pack(padx=1, pady=1, side=tk.TOP)
      contrast_slider = tk.Scale(self.field_container2, from_=-100, to=100, orient=tk.HORIZONTAL, command=self.updateContrast ).pack(padx=1, pady=1, side=tk.BOTTOM)

      self.field_container2.pack(side=tk.TOP)
      self.container_left.pack(side=tk.LEFT)

      # Middle Container
      self.container_middle = tk.Frame(self.cal_frame4)

      self.xy_label_value = tk.StringVar()
      self.xy_label_value.set("x,y,fa,bp")
      self.xy_label= tk.Label(self.container_middle, textvariable=self.xy_label_value).pack(padx=1,pady=1,side=tk.TOP)

      self.ra_label_value = tk.StringVar()
      self.ra_label_value.set("ra,dec")
      self.ra_label= tk.Label(self.container_middle, textvariable=self.ra_label_value).pack(padx=1,pady=1,side=tk.TOP)

      self.az_label_value = tk.StringVar()
      self.az_label_value.set("alt,az")
      self.az_label= tk.Label(self.container_middle, textvariable=self.az_label_value).pack(padx=1,pady=1,side=tk.TOP)


      self.container_middle.pack(side=tk.LEFT)


      # Right side container! 
      self.container_right = tk.Frame(self.cal_frame4)
      self.thumbnail_canvas = tk.Canvas(self.container_right, width=100, height=100, cursor="cross")
      self.thumbnail_canvas.extra = "thumbnail_canvas"
      self.thumbnail_canvas.pack()
      self.container_right.pack(side=tk.LEFT)


      self.del_button = tk.Button(self.cal_frame4, text='Delete File', command=self.button_delete).pack(padx=1, pady=1, side=tk.LEFT)
      self.exit_button = tk.Button(self.cal_frame4, text='Exit', command=root.destroy).pack(padx=1, pady=1, side=tk.BOTTOM)

      self.cal_frame4.pack_propagate(0)
      self.cal_frame4.pack(side=tk.TOP)
Esempio n. 8
0
def diff_stills(sdate, cam_num):
    med_last_objects = []
    last_objects = deque(maxlen=5)
    diffed_files = []
    config = read_config("conf/config-1.txt")
    video_dir = "/mnt/ams2/SD/"
    images = []
    images_orig = []
    images_blend = []
    images_info = []
    count = 0
    last_image = None
    last_thresh_sum = 0
    hits = 0
    avg_cnt = 0
    avg_tot = 0
    avg_pts = 0
    count = 0

    glob_dir = video_dir + "proc/" + sdate + "/" + "*cam" + str(
        cam_num) + "-stacked.jpg"
    report_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(
        cam_num) + "-report.txt"
    master_stack_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(
        cam_num) + "-master_stack.jpg"

    #cv2.namedWindow('pepe')
    mask_file = "conf/mask-" + str(cam_num) + ".txt"
    file_exists = Path(mask_file)
    mask_exists = 0
    still_mask = [0, 0, 0, 0]
    if (file_exists.is_file()):
        print("File found.")
        ms = open(mask_file)
        for lines in ms:
            line, jk = lines.split("\n")
            exec(line)
        ms.close()
        mask_exists = 1
        (sm_min_x, sm_max_x, sm_min_y, sm_max_y) = still_mask
    diffs = 0
    image_list = []
    file_list = []
    sorted_list = []
    print("Loading still images from ", glob_dir)
    fp = open(report_file, "w")
    for filename in (glob.glob(glob_dir)):
        capture_date = parse_file_date(filename)
        sun_status, sun_alt = day_or_night(config, capture_date)
        if sun_status != 'day' and int(sun_alt) <= -5:
            #print("NIGHTTIME", capture_date, filename, sun_status)
            file_list.append(filename)
        else:
            print("This is a daytime or dusk file")

    sorted_list = sorted(file_list)
    for filename in sorted_list:
        open_cv_image = cv2.imread(filename, 0)
        orig_image = open_cv_image
        images_orig.append(orig_image)
        print(filename)
        open_cv_image[440:480, 0:640] = [0]
        if mask_exists == 1:
            open_cv_image[sm_min_y:sm_max_y, sm_min_x:sm_max_x] = [0]
        images.append(open_cv_image)

    #exit()
    #time.sleep(5)
    height, width = open_cv_image.shape
    master_stack = None
    # Define the codec and create VideoWriter object
    #fourcc = cv2.VideoWriter_fourcc(*'H264')
    #out = cv2.VideoWriter(outfile,fourcc, 5, (width,height),1)
    #med_stack_all = np.median(np.array(images[50:150]), axis=0)
    med_stack_all = np.median(np.array(images), axis=0)
    #cv2.imshow('pepe', cv2.convertScaleAbs(med_stack_all))
    #cv2.waitKey(1000)
    objects = None
    last_line_groups = []
    last_points = []
    for filename in sorted_list:
        hit = 0
        detect = 0
        el = filename.split("/")
        fn = el[-1]
        #this_image = cv2.imread(filename,1)
        this_image = images[count]

        if count >= 1:
            before_image = images[count - 1]
        else:
            before_image = images[count + 2]

        if count >= len(file_list) - 1:
            after_image = images[count - 2]
        else:
            after_image = images[count + 1]

        if count < 25:
            median = np.median(np.array(images[0:count + 25]), axis=0)

        elif len(images) - count < 25:
            median = np.median(np.array(images[count - 25:count]), axis=0)
        else:
            median = np.median(np.array(images[count - 25:count]), axis=0)

        if count < 10:
            background = images[count + 1]
            for i in range(0, 10):
                background = cv2.addWeighted(background, .8, images[count + i],
                                             .2, 0)
        else:
            background = images[count - 1]
            for i in range(0, 10):
                background = cv2.addWeighted(background, .8, images[count - i],
                                             .2, 0)

        img_rpt_file = filename.replace("-stacked.jpg", "-stack-report.txt")
        img_report = open(img_rpt_file, "w")

        (blend, points, line_groups, stars, obj_points,
         big_cnts) = inspect_image(med_stack_all, background, median,
                                   before_image, this_image, after_image,
                                   avg_cnt, avg_tot, avg_pts, filename)
        master_stack = stack_stack(blend, master_stack)
        img_report.write("points=" + str(points) + "\n")
        img_report.write("line_groups=" + str(line_groups) + "\n")
        img_report.write("stars=" + str(stars) + "\n")
        img_report.write("obj_points=" + str(obj_points) + "\n")
        img_report.write("big_cnts=" + str(big_cnts) + "\n")
        img_report.close()
        images_blend.append(blend)
        images_info.append((points, line_groups, stars, obj_points, big_cnts))
        # block out the detections in the master image to remove it from the running mask
        last_line_group = line_groups
        last_points = points
        for x, y, w, h in last_points:
            images[count][y:y + h, x:x + w] = 5

        count = count + 1
        if len(big_cnts) > 0 or len(obj_points) >= 3:
            hits = hits + 1
        #cv2.imshow('pepe', blend)
        #if len(line_groups) >= 1 or len(obj_points) > 3 or len(big_cnts) > 0:
        #cv2.waitKey(1)
        #   while(1):
        #      k = cv2.waitKey(33)
        #      if k == 32:
        #         break
        #      if k == 27:
        #         exit()
        #else:
        #cv2.waitKey(1)
        data = filename + "," + str(len(line_groups)) + "," + str(
            len(obj_points)) + "," + str(len(big_cnts)) + "\n"
        fp.write(data)

    print("TOTAL: ", len(file_list))
    print("HITS: ", hits)
    fp.close()

    if master_stack is not None:
        print("saving", master_stack_file)
        master_stack.save(master_stack_file, "JPEG")
    else:
        print("Failed.")

    hits = 1
    for count in range(0, len(sorted_list) - 1):
        file = sorted_list[count]
        el = file.split("/")
        st = el[-1]
        report_str = st.replace("-stacked.jpg", "-report.txt")
        video_str = st.replace("-stacked.jpg", ".mp4")
        video_file = file.replace("-stacked.jpg", ".mp4")
        (points, line_groups, stars, obj_points, big_cnts) = images_info[count]
        if len(obj_points) > 3 or len(big_cnts) > 0:
            for bc in big_cnts:
                (x, y, w, h) = bc
                obj_points.append((x, y, 5, 5))
                obj_points.append((x + w, y + h, 5, 5))
            np_obj_points = np.array(obj_points)
            max_x = np.max(np_obj_points[:, 0])
            max_y = np.max(np_obj_points[:, 1])
            min_x = np.min(np_obj_points[:, 0])
            min_y = np.min(np_obj_points[:, 1])
            myimg = cv2.imread(sorted_list[count], 0)
            cv2.rectangle(myimg, (min_x, min_y), (max_x, max_y), (255), 1)
            #cv2.imshow('pepe', myimg)
            #cv2.waitKey(1)

            print("-------")
            print("Count:", count)
            print("Hit:", hits)
            print("File:", sorted_list[count])
            print("Points:", str(len(points)))
            print("Line Groups:", str(len(line_groups)))
            gc = 1
            for line_group in line_groups:
                for dist, ang, x1, y1, w1, h1 in line_group:
                    print("GROUP: ", gc, dist, ang, x1, y1, w1, h1)
                gc = gc + 1
            print("Stars:", str(len(stars)))
            print("Obj Points:", str(len(obj_points)))
            print("Big Cnts:", str(len(big_cnts)))
            print("Min/Max X/Y:", str(min_x), str(min_y), str(max_x),
                  str(max_y))
            print("-------")

            hits = hits + 1
            video_report = video_file.replace(".mp4", "-report.txt")
            file_exists = Path(video_report)
            if (file_exists.is_file()):
                print("Already processed the video.")
            #else:
            #   print("./PV.py " + video_file + " " + cam_num)
            #   os.system("./PV.py " + video_file + " " + cam_num)
        else:
            min_x = min_y = max_x = max_y = 0
Esempio n. 9
0
def view(file, show = 0):

   # before processing dump frames from this file for 24 hour time lapse
   os.system("./dump-frames-tl.py " + file )

   config_file = ""
   try:
      (file_date, cam_num) = file.split("-")
      cam_num = cam_num.replace(".avi", "")
   except:
      cam_num = ""

   if cam_num == "":
      config = read_config(config_file)
   else: 
      #cam_num = sys.argv[1]
      config_file = "conf/config-" + cam_num + ".txt"
      config = read_config(config_file)



   #config = read_config(config_file)
   frame_time_data = []
   values = {}
   dir_name = os.path.dirname(file)
   file_name = file.replace(dir_name + "/", "")
   summary_file_name = file_name.replace(".avi", "-summary.txt")
   data_file_name = file_name.replace(".avi", ".txt")
   screen_cap_file_name = file_name.replace(".avi", ".jpg")
   object_file_name = file_name.replace(".avi", "-objects.jpg")
   time_file_name = file_name.replace(".avi", "-time.txt")
   capture_date = parse_file_date(file_name)
   #last_cal_date = # Get last / closest calibration date
   file_base_name = file_name.replace(".avi", "") 
   status = day_or_night(config, capture_date)

   # read in time file if it exists
   
   if os.path.isfile(dir_name + "/" + time_file_name):
      frame_time_data = read_time_file(dir_name + "/" + time_file_name)
      print ("FRAME TIME DATA LENGTH:", len(frame_time_data))
      time.sleep(1)
   else: 
      print ("no frame time data! " + dir_name + "/" + time_file_name ) 
      for x in range(0, 225): 
         frame_time_data.append("|||")


   fps_t = 0
   for ftd in frame_time_data:
      print ("FRAMEdata", ftd)
      fps, tc, tt, tx = ftd.split("|")
      if fps == "":
         fps = 0
      fps_t = int(float(fps_t)) + int(float(fps))
   if len(frame_time_data) > 0:
      avg_fps = fps_t / len(frame_time_data)
   else :
      avg_fps = 0

   

   print ("Viewing file: " + file)
   print ("Directory: " + dir_name)
   print ("File Name: " + file_name)
   print ("Summary File Name: " + summary_file_name)
   print ("Data File Name: " + data_file_name)
   print ("Screen Cap File Name: " + screen_cap_file_name)
   print ("Object File Name: " + object_file_name)
   print ("Capture Date: " + capture_date)
   print ("FPS: " + str(avg_fps))

   # make sure the file exists
   if os.path.isfile(file) is False:
      print("This file does not exist. Exiting.")
      return(0)
   else:
      print ("The file is ok.")

   #process video

   tstamp_prev = None
   image_acc = None
   last_frame = None
   nice_image_acc = None
   final_image = None
   cur_image = None
   frame_count = 0

   # open data log file
   fp = open(dir_name + "/" + data_file_name, "w")
   fp2 = open(dir_name + "/" + summary_file_name, "w")
   fp.write("frame|contours|x|y|w|h|color|fps|adjusted_unixtime|unixtime|time_offset\n")
   fp2.write("frame|contours|x|y|w|h|color|fps|adjusted_unixtime|unixtime|time_offset\n")


   #if show == 1:
   #   cv2.namedWindow('pepe') 


   cap = cv2.VideoCapture(file)
   time.sleep(2)
   xs = []
   ys = []
   motion_frames = []
   frames = []
   colors = []

   noise = 0

   while True:
      _ , frame = cap.read()
      frame_count = frame_count + 1
      frames.extend([frame])
      if frame is None:
         if frame_count <= 1:
            print("Bad file.")
            return(0)
         else:
            print("Processed ", frame_count, "frames.")
            # finish processing file and write output files

            total_motion = len(motion_frames)
            if total_motion < 3 :
               #this a BS capture. abort
               os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/dist/") 
               return(0)
 
            half_motion = int(round(total_motion/2,0))
            print ("key frame #1 : ", 1) 
            print ("key frame #2 : ", half_motion) 
            print ("key frame #3 : ", total_motion -1) 
            print ("Xs", xs)
            print ("Ys", ys)
            print ("MF", motion_frames)
            avg_color = sum(colors) / float(len(colors))
     
            print ("CL", colors)
            print ("Avg Color: ", avg_color)

            #print (motion_frames[1])
            #print (motion_frames[half_motion])
            #print (motion_frames[total_motion -1])

            #print(frames[motion_frames[1]])
            #print(frames[motion_frames[half_motion]])
            #print(frames[motion_frames[total_motion - 1]])

            object_file_image = (frames[motion_frames[1]] * .33) + (frames[motion_frames[half_motion]] * .33) + (frames[motion_frames[total_motion-2]] * .33) 
          
     
            x1 = xs[1]
            y1 = xs[1]
            x2 = xs[half_motion]
            y2 = xs[half_motion]
            x3 = xs[total_motion-2]
            y3 = xs[total_motion-2]

            xmax = max(xs)
            ymax = max(ys)
            xmin = min(xs)
            ymin = min(ys)
            skip = 0

            if xmax - xmin == 0 and ymax - ymin == 0:
               skip = 1

            straight_line = compute_straight_line(x1,y1,x2,y2,x3,y3)
            if (straight_line < 1 and straight_line > 0) or avg_color > 190:
               meteor_yn = "Y"
            else:
               meteor_yn = "N"

            if status == 'night':
               meteor_yn = "Y"
            else:
               meteor_yn = "N"

       

            #meteor_yn = "Y"
            if skip == 1:
               meteor_yn = "N"
               print ("Skipping not enough x,y movement!", xmax, xmin, ymax, ymin)
            if noise >= 5:
               meteor_yn = "N"
               print ("Skipping to much noise!", noise)
            if avg_fps < 20:
               meteor_yn = "N"
               print ("Skipping calibration file!", avg_fps)


            print ("Status:", status)
            print ("Straight Line:", straight_line)
            print ("Likely Meteor:", meteor_yn)


            obj_outfile = dir_name + "/" + object_file_name
            sc_outfile = dir_name + "/" + screen_cap_file_name 
            cv2.imwrite(obj_outfile, object_file_image)
            cv2.imwrite(sc_outfile, object_file_image)


            #write summary & data files

            fp.close()
            fp2.close()

            # prep event or capture for upload to AMS
            values['datetime'] = capture_date 
            values['motion_frames'] = total_motion 
            values['cons_motion'] = total_motion
            values['color'] = avg_color
            values['straight_line'] = straight_line
            values['meteor_yn'] = meteor_yn
            values['bp_frames'] = total_motion

            if meteor_yn == 'Y':
               try:
                  values['best_caldate'] = config['best_caldate']
               except:
                  config['best_caldate'] = '0000-00-00 00:00:00';
                  values['best_caldate'] = config['best_caldate']
               try:
                  log_fireball_event(config, file, dir_name + "/" + summary_file_name, dir_name + "/" + object_file_name, values)
               except:
                  print ("failed to upload event file.")
                  return(0)
               #move files to maybe dir
               print ("Move to maybe dir!")
               os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/maybe/") 
            else:
               log_motion_capture(config, dir_name + "/" + object_file_name, values)
               try:
                   log_motion_capture(config, dir_name + "/" + object_file_name, values)
               except:
                  print ("failed to upload capture file.")
                  return(0)
               print ("Move to false dir!")
               if (skip == 1 or noise >= 5) and status == 'night': 
                  os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/dist/") 
               elif avg_fps < 20:
                  os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/calvid/") 
               else:
                  os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/false/") 

            return(1)
      nice_frame = frame

      alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
      frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
      frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
      gray_frame = frame
      frame = cv2.GaussianBlur(frame, (21, 21), 0)
      if last_frame is None:
         last_frame = nice_frame
      if image_acc is None:
         image_acc = np.empty(np.shape(frame))
      image_diff = cv2.absdiff(image_acc.astype(frame.dtype), frame,)
      hello = cv2.accumulateWeighted(frame, image_acc, alpha)
      _, threshold = cv2.threshold(image_diff, 30, 255, cv2.THRESH_BINARY)
      thresh= cv2.dilate(threshold, None , iterations=2)
      (_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
      data = str(frame_count) + "|"

      color = 0
      contours = len(cnts)
      x,y,w,h = 0,0,0,0

      if contours > 3:
         noise = noise + 1

      if contours > 0:
          x,y,w,h = cv2.boundingRect(cnts[0])
          mx = x + w
          my = y + h
          cx = int(x + (w/2))
          cy = int(y + (h/2))
          color = gray_frame[cy,cx]
          xs.extend([x])
          ys.extend([y])
          colors.extend([color])
          motion_frames.extend([frame_count])
         
      line_data = str(frame_count) + "|" + str(contours) + "|" + str(x) + "|" + str(y) + "|" + str(w) + "|" + str(h) + "|" + str(color) + "|" + frame_time_data[frame_count-1] + "\n"

      fp.write(line_data)
      fp2.write(line_data)
      print (frame_count, contours, x,y,w,h,color)
Esempio n. 10
0
        date_dir = "/mnt/ams2/SD/proc/" + xyear + "-" + xmonth + "-" + xday
        file_exists = Path(date_dir)
        skip = 0
        if (file_exists.is_dir() is False):
            os.system("mkdir " + date_dir)
        if sun_status == "day":
            cmd = "mv " + file + " /mnt/ams2/SD/proc/daytime/" + file_name
            print(cmd)
            os.system(cmd)
            cmd = "mv " + video_file + " /mnt/ams2/SD/proc/daytime/" + video_file_name
            print(cmd)
            os.system(cmd)
        else:
            if "-stacked" not in file_name:
                file_name = file_name.replace("stack", "stacked")
            cmd = "mv " + file + " " + date_dir + "/" + file_name
            print(cmd)
            os.system(cmd)

            cmd = "mv " + video_file + " " + date_dir + "/" + video_file_name
            print(cmd)
            os.system(cmd)


conf = read_config("conf/config-1.txt")
purge_hd_files()
purge_sd_files()
move_processed_SD_files()
purge_SD_proc_dir()
Esempio n. 11
0
def get_calibration_frames():
    config = read_config()
    config = custom_settings("Calibration", config)
    fp = open("/home/pi/fireball_camera/calnow", "w")

    set_setting(config, "Brightness", config['Brightness'])

    r = requests.get(
        "http://" + config['cam_ip'] +
        "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=25&paramstep=0&paramreserved=0&"
    )

    cap = cv2.VideoCapture("rtsp://" + config['cam_ip'] + "/av0_1")

    cv2.setUseOptimized(True)
    lock = open("/home/pi/fireball_camera/calibrate.txt", "w")
    time_start = time.time()
    time.sleep(3)

    frames = deque(maxlen=200)
    frame_times = deque(maxlen=200)
    count = 0

    while count < 301:
        _, frame = cap.read()
        if _ is True:
            if count > 100:
                frame_time = time.time()
                frames.appendleft(frame)
                frame_times.appendleft(frame_time)

        if count == 300:
            dql = len(frame_times) - 1
            time_diff = frame_times[1] - frame_times[dql]
            fps = 100 / time_diff
            format_time = datetime.datetime.fromtimestamp(
                int(frame_time)).strftime("%Y%m%d%H%M%S")
            outfile = "{}/{}.avi".format("/var/www/html/out/cal", format_time)
            if int(config['hd']) == 0:
                frame_sz = cv2.resize(frames[0], (0, 0), fx=1, fy=.75)
            else:
                frame_sz = frames[0]

            writer = cv2.VideoWriter(outfile, cv2.VideoWriter_fourcc(*'MJPG'),
                                     fps,
                                     (frame_sz.shape[1], frame_sz.shape[0]),
                                     True)
            flc = 0
            while frames:
                if (flc > 30):
                    img = frames.pop()
                    if int(config['hd']) == 0:
                        frame_sz = cv2.resize(img, (0, 0), fx=1, fy=.75)
                    else:
                        frame_sz = img
                    img = frame_sz

                    frame_time = frame_times.pop()
                    format_time = datetime.datetime.fromtimestamp(
                        int(frame_time)).strftime("%Y%m%d%H%M%S")
                    dec_sec = datetime.datetime.fromtimestamp(
                        int(frame_time)).strftime("%f")
                    format_time = format_time + dec_sec
                    writer.write(img)
                flc = flc + 1
            writer.release()
        count = count + 1

    # sense camera down
    r = requests.get(
        "http://" + config['cam_ip'] +
        "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=50&paramstep=0&paramreserved=0&"
    )
    config = custom_settings("Night", config)
    set_setting(config, "Brightness", config['Brightness'])
    cap.release()
    time.sleep(3)
    os.system("rm /home/pi/fireball_camera/calnow")
    return (outfile)
Esempio n. 12
0
def diff_stills(sdate, cam_num):
    med_last_objects = []
    last_objects = deque(maxlen=5)
    diffed_files = []
    config = read_config("conf/config-1.txt")
    video_dir = "/mnt/ams2/SD/"
    images = []
    count = 0
    last_image = None
    last_thresh_sum = 0
    hits = 0
    avg_cnt = 0
    avg_tot = 0
    avg_pts = 0
    count = 0

    glob_dir = video_dir + "proc/" + sdate + "/" + "*cam" + str(
        cam_num) + "-stacked.jpg"
    report_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(
        cam_num) + "-report.txt"

    cv2.namedWindow('pepe')
    mask_file = "conf/mask-" + str(cam_num) + ".txt"
    file_exists = Path(mask_file)
    mask_exists = 0
    if (file_exists.is_file()):
        print("File found.")
        ms = open(mask_file)
        for lines in ms:
            line, jk = lines.split("\n")
            exec(line)
        ms.close()
        mask_exists = 1
        (sm_min_x, sm_max_x, sm_min_y, sm_max_y) = still_mask

    diffs = 0
    image_list = []
    file_list = []
    sorted_list = []
    print("Loading still images from ", glob_dir)
    fp = open(report_file, "w")
    for filename in (glob.glob(glob_dir)):
        capture_date = parse_file_date(filename)
        sun_status = day_or_night(config, capture_date)
        if sun_status != 'day':
            #print("NIGHTTIME", capture_date, filename, sun_status)
            file_list.append(filename)

    sorted_list = sorted(file_list)
    for filename in sorted_list:
        open_cv_image = cv2.imread(filename, 0)
        open_cv_image[440:480, 0:640] = [0]
        if mask_exists == 1:
            open_cv_image[sm_min_y:sm_max_y, sm_min_x:sm_max_x] = [0]
        images.append(open_cv_image)

    #exit()
    #time.sleep(5)
    height, width = open_cv_image.shape

    # Define the codec and create VideoWriter object
    #fourcc = cv2.VideoWriter_fourcc(*'H264')
    #out = cv2.VideoWriter(outfile,fourcc, 5, (width,height),1)
    #med_stack_all = np.median(np.array(images[50:150]), axis=0)
    med_stack_all = np.median(np.array(images), axis=0)
    #cv2.imshow('pepe', cv2.convertScaleAbs(med_stack_all))
    #cv2.waitKey(1000)
    objects = None
    last_line_groups = []
    last_points = []
    for filename in sorted_list:
        hit = 0
        detect = 0
        el = filename.split("/")
        fn = el[-1]
        #this_image = cv2.imread(filename,1)
        this_image = images[count]

        if count >= 1:
            before_image = images[count - 1]
        else:
            before_image = images[count + 2]

        if count >= len(file_list) - 1:
            after_image = images[count - 2]
        else:
            after_image = images[count + 1]

        if count < 25:
            median = np.median(np.array(images[0:count + 25]), axis=0)

        elif len(images) - count < 25:
            median = np.median(np.array(images[count - 25:count]), axis=0)
        else:
            median = np.median(np.array(images[count - 25:count]), axis=0)

        if count < 10:
            background = images[count + 1]
            for i in range(0, 10):
                background = cv2.addWeighted(background, .8, images[count + i],
                                             .2, 0)
        else:
            background = images[count - 1]
            for i in range(0, 10):
                background = cv2.addWeighted(background, .8, images[count - i],
                                             .2, 0)

        result, line_groups, points = inspect_image(med_stack_all, background,
                                                    median, before_image,
                                                    this_image, after_image,
                                                    avg_cnt, avg_tot, avg_pts,
                                                    filename)
        # block out the detections in the master image to remove it from the running mask
        last_line_group = line_groups
        last_points = points
        for x, y, w, h in last_points:
            images[count][y:y + h, x:x + w] = 5

        count = count + 1
        hits = hits + result
    print("TOTAL: ", len(file_list))
    print("DIFFS: ", diffs)
    print("HITS: ", hits)
    fp.close()

    for file in diffed_files:
        el = file.split("/")
        st = el[-1]
        report_str = st.replace("-stack.jpg", "-report.txt")
        video_str = st.replace("-stack.jpg", ".mp4")
Esempio n. 13
0
def diff_stills(sdate, cam_num, show_video):
   med_last_objects = []
   last_objects = deque(maxlen=5) 
   diffed_files = []
   config = read_config("conf/config-1.txt")
   video_dir = "/mnt/ams2/SD/"
   images = []
   images_orig = []
   images_blend = []
   images_info = []
   count = 0
   last_image = None
   last_thresh_sum = 0
   hits = 0
   avg_cnt = 0
   avg_tot = 0
   avg_pts = 0
   count = 0

   glob_dir = video_dir + "proc/" + sdate + "/" + "*cam" + str(cam_num) + "-stacked.jpg"
   report_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-report.txt"
   master_stack_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-master_stack.jpg"

   if int(show_video) == 1:
      cv2.namedWindow('pepe')
   mask_file = "conf/mask-" + str(cam_num) + ".txt"
   file_exists = Path(mask_file)
   mask_exists = 0
   still_mask = [0,0,0,0]
   if (file_exists.is_file()):
      print("File found.")
      ms = open(mask_file)
      for lines in ms:
         line, jk = lines.split("\n")
         exec(line)
      ms.close()
      mask_exists = 1
      (sm_min_x, sm_max_x, sm_min_y, sm_max_y) = still_mask
   diffs = 0
   image_list = []
   file_list = []
   sorted_list = []
   print ("Loading still images from ", glob_dir)

   for filename in (glob.glob(glob_dir)):
       capture_date = parse_file_date(filename)
       sun_status, sun_alt = day_or_night(config, capture_date)
       #if sun_status != 'day' and int(sun_alt) < -4:
       #if sun_status != 'day' :
          #print("NIGHTTIME", capture_date, filename, sun_status)
       file_list.append(filename)
       #else: 
       #   print ("This is a daytime or dusk file")
   
   sorted_list = sorted(file_list)
   print ("Loading Images...")
   print ("TOTAL IMAGES TO START:", len(sorted_list))
   #time.sleep(5)
   for filename in sorted_list:
      open_cv_image = cv2.imread(filename,0)
      orig_image = open_cv_image
      images_orig.append(orig_image)
      print(filename)
      #open_cv_image[440:480, 0:640] = [0]
      #if mask_exists == 1:
      #   open_cv_image[sm_min_y:sm_max_y, sm_min_x:sm_max_x] = [0]
      if open_cv_image is not None:
         if len(open_cv_image.shape) > 2:
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2GRAY)
         images.append(open_cv_image)
      else:
         print ("IMAGE IS BAD!")
         cmd = "rm " + filename
         os.system(cmd)
       

   print ("Finished Loading Images.")

   height , width =  open_cv_image.shape
   master_stack = None 
   print ("Make master median...")
   #med_stack_all = np.median(np.array(images), axis=0)
   print ("Done master median...")
   objects = None
   last_line_groups = []
   last_points = []
   for filename in sorted_list:
      hit = 0
      detect = 0
      el = filename.split("/")
      fn = el[-1]
      this_image = images[count]
      blend_file = filename.replace('stacked', 'blend')
      file_exists = Path(blend_file)
      if (file_exists.is_file()):
         blend = cv2.imread(blend_file)
         print ("We already did this. Skip", filename)
         print (blend.shape)

         capture_date = parse_file_date(fn)
         sun_status, sun_alt = day_or_night(config, capture_date)
         if int(sun_alt) < -17:
            print("SUN: ", sun_alt)
            master_stack = stack_stack(blend, master_stack)
            if int(show_video) == 1:
               cv2.imshow('pepe', blend)
               cv2.waitKey(10)
      else:
   
         if count >= 1:
            before_image = images[count-1]
         else:
            before_image = images[count+2]
   
         if count >= len(file_list)-1:
            after_image = images[count-2]
         else:
            after_image = images[count+1]
   
         if count < 50:
            median = np.median(np.array(images[0:count+50]), axis=0)
         
         elif len(images) - count < 50:
            median = np.median(np.array(images[count-50:count]), axis=0)
         else:
            median = np.median(np.array(images[count-50:count]), axis=0)


         #image_diff = cv2.absdiff(image_acc.astype(frame.dtype), frame,)
         if count < 15:
            background = images[count+1] 
            for i in range (0,15):
               background = cv2.addWeighted(background, .94, images[count+i], .06,0)
         else:
            background = images[count-1] 
            for i in range (0,15):
               background = cv2.addWeighted(background, .94, images[count-i], .06,0)
 
         img_rpt_file = filename.replace("-stacked.jpg", "-stack-report.txt")
         #img_report = open(img_rpt_file, "w")

         current_image = this_image
         image_diff = cv2.absdiff(current_image.astype(current_image.dtype), background,)
         orig_image = current_image
         #current_image = image_diff
         print ("working on: ", filename)
         blend, current_image, filename = diff_all(None, background, median, before_image, current_image, after_image,filename)
         blend_file = filename.replace('stacked', 'blend')
         cv2.imwrite(blend_file, blend)
         #cv2.imshow('pepe', blend)
         #cv2.waitKey(1)
         count = count + 1

      capture_date = parse_file_date(fn)
      sun_status, sun_alt = day_or_night(config, capture_date)
      if int(sun_alt) < -17:
         master_stack = stack_stack(blend, master_stack)

   if master_stack is not None:
      print("saving", master_stack_file)
      master_stack.save(master_stack_file, "JPEG")
   else:
      print("Failed to save master stack.")
Esempio n. 14
0
            file_list.append(filename)
            count = count + 1
    report = report + "   " + str(count) + " distorton captures\n"
    report = report + "--------------------------\n"
    return (report)


cur_time = int(time.time())
yest_time = int(time.time() - 86400)
stop_time = datetime.datetime.fromtimestamp(
    int(cur_time)).strftime("%Y-%m-%d %H:%M:%S")
start_time = datetime.datetime.fromtimestamp(
    int(yest_time)).strftime("%Y-%m-%d %H:%M:%S")

try:
    config = read_config("conf/config-1.txt")
    single = 0
except:
    config = read_config("config.txt")
    cam_num = config['cam_num']
    single = 1

report = "AllSky6 24 Hour Report for " + config['obs_name'] + "\n"
report = report + "Period:" + str(start_time) + " to " + str(stop_time) + "\n"
report = report + "Camera Operator: " + config['first_name'] + " " + config[
    'last_name'] + "\n"
report = report + "Latitude : " + config['device_lat'] + "\n"
report = report + "Longitude: " + config['device_lng'] + "\n"
report = report + "Altitude: " + config['device_alt'] + "\n"

if single == 0:
from view import log_motion_capture
from amscommon import read_config, caldate

values = {}
values['datetime'] = "2017-06-22 13:27:58"
values['motion_frames'] = 2
values['cons_motion'] = 2
values['color'] = 1
values['straight_line'] = 1
values['meteor_yn'] = 0
values['bp_frames'] = 3
log_motion_capture(read_config(), "./test_data/20170622132758.jpg", values)
Esempio n. 16
0
def calibrate_file(image_path):

    padding = 100
    image_np = cv2.imread(image_path)
    image = Image.fromarray(image_np)
    new_image = image
    ow = image_np.shape[1]
    oh = image_np.shape[0]

    image_width = ow + padding
    image_height = oh + padding
    if (ow != image_width and oh != image_height):
        canvas_size = oh + padding, ow + padding, 3
        canvas = np.zeros(canvas_size, dtype=np.uint8)
        canvas.fill(0)
        canvas[padding:oh + padding, padding:ow + padding] = image_np
        image_np = canvas

        image = Image.fromarray(image_np)
        new_image = image
        cv2.imwrite(image_path, image_np)

        image_np = cv2.imread(image_path)
        image = Image.fromarray(image_np)
        new_image = image

    cal_obj.image = cv2.imread(image_path)
    cal_obj.image = Image.fromarray(cal_obj.image)
    cal_obj.new_image = cal_obj.image
    cal_obj.star_thresh = 5
    cal_obj.find_stars()
    cal_obj.image_width = image_np.shape[1]
    cal_obj.image_height = image_np.shape[0]

    # load site and date here...
    config = read_config("conf/config-1.txt")
    cal_obj.loc_lat = config['device_lat']
    cal_obj.loc_lon = config['device_lng']
    cal_obj.loc_alt = config['device_alt']
    cal_obj.cal_time = cal_obj.parse_file_date(image_path)

    cal_obj.odds_to_solve = 100000
    odds = 100000
    cal_obj.code_tolerance = .03
    cal_obj.update_path(image_path)

    cal_obj.solve_field()
    if cal_obj.solve_success == 0:
        odds = 10000
        cal_obj.odds_to_solve = 10000
        cal_obj.solve_field()

    if cal_obj.solve_success == 0:
        odds = 1000
        cal_obj.odds_to_solve = 1000
        cal_obj.solve_field()

    if cal_obj.solve_success == 0:
        odds = 100
        cal_obj.odds_to_solve = 100
        cal_obj.solve_field()

    return (cal_obj.solve_success, odds)
Esempio n. 17
0
def view(file, show=0):
    nostars = 0
    skip = 0
    avg_color = 0
    max_cons_motion = 0
    straight_line = -1
    final_image = None
    start_time = int(time.time())

    el = file.split("/")
    act_file = el[-1]
    dir = file.replace(act_file, "")

    acc_file = act_file.replace(".mp4", "-acc.jpg")
    star_file = act_file.replace(".mp4", "-stars.jpg")

    acc_file = dir + "stars/" + acc_file
    star_file = dir + "stars/" + star_file

    # before processing dump frames from this file for 24 hour time lapse
    #os.system("./dump-frames-tl.py " + file )

    config_file = ""
    try:
        (file_date, cam_num) = file.split("-")
        cam_num = cam_num.replace(".avi", "")
    except:
        cam_num = ""

    if cam_num == "":
        config = read_config(config_file)
    else:
        #cam_num = sys.argv[1]
        config_file = "conf/config-" + cam_num + ".txt"
        config = read_config(config_file)

    #config = read_config(config_file)
    frame_time_data = []
    values = {}
    dir_name = os.path.dirname(file) + "/"
    file_name = file.replace(dir_name, "")
    summary_file_name = file_name.replace(".mp4", "-summary.txt")
    data_file_name = file_name.replace(".mp4", ".txt")
    screen_cap_file_name = file_name.replace(".mp4", ".jpg")
    object_file_name = file_name.replace(".mp4", "-objects.jpg")
    time_file_name = file_name.replace(".mp4", "-time.txt")
    capture_date = parse_file_date(file_name)
    #last_cal_date = # Get last / closest calibration date
    file_base_name = file_name.replace(".mp4", "")
    print(capture_date)
    status = day_or_night(config, capture_date)

    if status == "day":
        print("Skip this daytime file.")
        move_file(dir_name + file_name, "day")
        return ()

    # read in time file if it exists

    #if os.path.isfile(dir_name + "/" + time_file_name):
    #   frame_time_data = read_time_file(dir_name + "/" + time_file_name)
    #   print ("FRAME TIME DATA LENGTH:", len(frame_time_data))
    #   time.sleep(1)
    #else:
    #   print ("no frame time data! " + dir_name + "/" + time_file_name )
    #   for x in range(0, 225):
    #      frame_time_data.append("|||")

    #fps_t = 0
    #for ftd in frame_time_data:
    #   print ("FRAMEdata", ftd)
    #   fps, tc, tt, tx = ftd.split("|")
    #   if fps == "":
    #      fps = 0
    #   fps_t = int(float(fps_t)) + int(float(fps))
    #if len(frame_time_data) > 0:
    #   avg_fps = fps_t / len(frame_time_data)
    #else :
    #   avg_fps = 0

    avg_fps = 25

    print("Viewing file: " + file)
    print("Directory: " + dir_name)
    print("File Name: " + file_name)
    print("Summary File Name: " + summary_file_name)
    print("Data File Name: " + data_file_name)
    print("Screen Cap File Name: " + screen_cap_file_name)
    print("Object File Name: " + object_file_name)
    print("Capture Date: " + capture_date)
    #print ("FPS: " + str(avg_fps))

    # make sure the file exists
    if os.path.isfile(file) is False:
        print("This file does not exist. Exiting.")
        return (0)
    else:
        print("The file is ok.")

    #process video

    tstamp_prev = None
    image_acc = None
    last_frame = None
    last_gray_frame = None
    nt_acc = None
    nice_image_acc = None
    final_image = None
    cur_image = None
    frame_count = 0

    # open data log file
    #fp = open(dir_name + "/" + data_file_name, "w")
    #fp2 = open(dir_name + "/" + summary_file_name, "w")
    #fp.write("frame|contours|x|y|w|h|color|fps|adjusted_unixtime|unixtime|time_offset\n")
    #fp2.write("frame|contours|x|y|w|h|color|fps|adjusted_unixtime|unixtime|time_offset\n")

    show = 1
    if show == 1:
        cv2.namedWindow('pepe')

    #cap = skvideo.io.VideoCapture(file)
    cap = cv2.VideoCapture(file)
    time.sleep(2)
    xs = []
    ys = []
    motion_frames = []
    frames = []
    colors = []

    show_sw = 0

    noise = 0
    prev_motion = 0
    cons_motion = 0
    motion_events = []

    while True:
        _, frame = cap.read()
        if frame is not None:
            frame[680:720, 0:620] = [0, 0, 0]
        frame_count = frame_count + 1
        #frames.extend([frame])

        if frame is None or nostars > 5:
            if frame_count <= 1:
                print("Bad file.")
                return (0)
            else:
                end_time = int(time.time())
                elapsed = end_time - start_time
                print("Processed ", frame_count, "frames. in ", elapsed,
                      "seconds")
                # finish processing file and write output files

                total_motion = len(motion_frames)

                if total_motion > 3:
                    half_motion = int(round(total_motion / 2, 0))
                    print("key frame #1 : ", 1)
                    print("key frame #2 : ", half_motion)
                    print("key frame #3 : ", total_motion - 1)
                    print("Xs", xs)
                    print("Ys", ys)
                    print("MF", motion_frames)
                    avg_color = sum(colors) / float(len(colors))

                    print("CL", colors)
                    print("Avg Color: ", avg_color)

                    #object_file_image = (frames[motion_frames[1]] * .33) + (frames[motion_frames[half_motion]] * .33) + (frames[motion_frames[total_motion-2]] * .33)

                    x1 = xs[1]
                    y1 = xs[1]
                    x2 = xs[half_motion]
                    y2 = xs[half_motion]
                    x3 = xs[total_motion - 2]
                    y3 = xs[total_motion - 2]

                    xmax = max(xs)
                    ymax = max(ys)
                    xmin = min(xs)
                    ymin = min(ys)
                    skip = 0

                    if xmax - xmin == 0 and ymax - ymin == 0:
                        skip = 1

                    straight_line = compute_straight_line(
                        x1, y1, x2, y2, x3, y3)
                    if len(motion_events) > 3:
                        max_cons_motion = max(motion_events)
                    if (straight_line < 1
                            and straight_line >= 0) or avg_color > 190:
                        meteor_yn = "Y"
                    else:
                        meteor_yn = "N"

                if status == 'night':
                    meteor_yn = "Y"
                else:
                    meteor_yn = "N"

                if total_motion <= 3:
                    meteor_yn = "N"

                #meteor_yn = "Y"
                if skip == 1:
                    meteor_yn = "N"
                    print("Skipping not enough x,y movement!", xmax, xmin,
                          ymax, ymin)
                if noise >= 5:
                    meteor_yn = "N"
                    print("Skipping to much noise!", noise)
                if avg_fps < 20:
                    meteor_yn = "N"
                    print("Skipping calibration file!", avg_fps)

                # write out the stacked image_acc and the stacked star file
                # nt_acc

                print("Writing:", acc_file)
                print("Writing:", star_file)

                #image_acc_cl = cv2.cvtColor(image_acc, cv2.COLOR_GRAY2BGR)
                #en_image_acc = Image.fromarray(cv2.convertScaleAbs(image_acc))

                enhancer = ImageEnhance.Brightness(final_image)
                enhanced_image = enhancer.enhance(1)
                np_enhanced_image = np.asarray(enhanced_image)
                #cv2.imwrite(acc_file, np_enhanced_image)

                print("Total Stars: ", total_stars)
                if total_stars == 0:
                    star_file = star_file.replace("/stars/", "/clouds/")
                    acc_file = acc_file.replace("/stars/", "/clouds/")
                cv2.imwrite(acc_file, np_enhanced_image)

                if total_stars > 0:
                    cv2.imwrite(star_file, star_image)
                #cv2.imshow('pepe', star_image)
                #cv2.waitKey(100)

                print("Status:", status)
                print("Total Motion:", total_motion)
                print("Cons Motion:", cons_motion)
                print("Straight Line:", straight_line)
                print("Likely Meteor:", meteor_yn)

                #obj_outfile = dir_name + "/" + object_file_name
                #sc_outfile = dir_name + "/" + screen_cap_file_name
                #cv2.imwrite(obj_outfile, object_file_image)
                #cv2.imwrite(sc_outfile, object_file_image)

                #write summary & data files

                #fp.close()
                #fp2.close()
                # prep event or capture for upload to AMS
                values['datetime'] = capture_date
                values['motion_frames'] = total_motion
                values['cons_motion'] = max_cons_motion
                values['color'] = avg_color
                values['straight_line'] = straight_line
                values['meteor_yn'] = meteor_yn
                values['bp_frames'] = total_motion
                print("ending here...")
                exit()

                if meteor_yn == 'Y':
                    try:
                        values['best_caldate'] = config['best_caldate']
                    except:
                        config['best_caldate'] = '0000-00-00 00:00:00'
                        values['best_caldate'] = config['best_caldate']
                    #try:
                    #log_fireball_event(config, file, dir_name + "/" + summary_file_name, dir_name + "/" + object_file_name, values)
                    #except:
                    #   print ("failed to upload event file.")
                    #   return(0)
                    #move files to maybe dir
                    print("Move to maybe dir!")
                    #os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/maybe/")

                    move_file(dir_name + file_name, "maybe")

                else:
                    #log_motion_capture(config, dir_name + "/" + object_file_name, values)
                    #try:
                    #log_motion_capture(config, dir_name + "/" + object_file_name, values)
                    #except:
                    #   print ("failed to upload capture file.")
                    #   return(0)
                    #print ("Move to false dir!")
                    if (skip == 1 or noise >= 5) and status == 'night':
                        #os.system("mv " + dir_name + "/" + file_base_name + "* " + "/var/www/html/out/dist/")
                        move_file(dir_name + file_name, "dist")

                    elif avg_fps < 20:
                        move_file(dir_name + file_name, "calvid")
                    else:
                        if total_motion >= 3:
                            move_file(dir_name + file_name, "false")
                        else:
                            move_file(dir_name + file_name, "no_motion_night")

                return (1)

        # main video processing loop here
        if status == "day":
            mod_skip = 5
        else:
            mod_skip = 5

        if frame_count % mod_skip == 0:
            nice_frame = frame

            alpha, tstamp_prev = iproc.getAlpha(tstamp_prev)
            frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
            #print ("AFTER:", np.shape(frame))

            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray_frame = frame
            # if last_gray_frame is None:
            #    last_gray_frame = gray_frame
            # else:
            #    gray_frame_cmb = gray_frame + last_gray_frame
            #    last_gray_frame = gray_frame
            #    gray_frame = gray_frame_cmb

            frame = cv2.GaussianBlur(frame, (21, 21), 0)
            if last_frame is None:
                last_frame = nice_frame
            if image_acc is None:
                image_acc = np.empty(np.shape(frame))

            image_diff = cv2.absdiff(
                image_acc.astype(frame.dtype),
                frame,
            )
            alpha = .076
            hello = cv2.accumulateWeighted(frame, image_acc, alpha)
            _, threshold = cv2.threshold(image_diff, 30, 255,
                                         cv2.THRESH_BINARY)
            thresh = cv2.dilate(threshold, None, iterations=2)
            (_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)

            frame_img = Image.fromarray(frame)
            if final_image is None:
                final_image = frame_img
            final_image = ImageChops.lighter(final_image, frame_img)

            if frame_count > 5:
                np_final_image = np.asarray(final_image)
                gray_nice_frame = cv2.cvtColor(nice_frame, cv2.COLOR_BGR2GRAY)
                total_stars, star_image = find_stars(gray_nice_frame)
                if total_stars == 0:
                    print("Cloudy...???")
                    nostars = nostars + 1
                else:
                    nostars = 0
            # if you want to stack the accumulated frames..
            #image_acc_pil = Image.fromarray(cv2.convertScaleAbs(image_acc))
            #final_image=ImageChops.lighter(final_image,image_acc_pil)

            data = str(frame_count) + "|"

            color = 0
            contours = len(cnts)
            x, y, w, h = 0, 0, 0, 0

            if contours > 3:
                noise = noise + 1

            if contours > 0 and frame_count > 5:
                x, y, w, h = cv2.boundingRect(cnts[0])
                mx = x + w
                my = y + h
                cx = int(x + (w / 2))
                cy = int(y + (h / 2))
                color = gray_frame[cy, cx]
                xs.extend([x])
                ys.extend([y])
                colors.extend([color])

                motion_frames.extend([frame_count])
                prev_motion = 1
                cons_motion = cons_motion + 1
            else:
                if cons_motion > 0:
                    motion_events.append(cons_motion)
                prev_motion = 0

            line_data = ""
            #line_data = str(frame_count) + "|" + str(contours) + "|" + str(x) + "|" + str(y) + "|" + str(w) + "|" + str(h) + "|" + str(color) + "|" + frame_time_data[frame_count-1] + "\n"

            #fp.write(line_data)
            #fp2.write(line_data)
            print(frame_count, contours, x, y, w, h, color)
            if frame_count % 10 == 0:
                np_final_image = np.asarray(final_image)
                np_star_image = np.asarray(star_image)
                small_star_image = cv2.resize(star_image, (0, 0),
                                              fx=0.5,
                                              fy=0.5)
                #cv2.imshow('pepe', small_star_image)
                cv2.imshow('pepe', np_final_image)
                #cv2.imshow('pepe', cv2.convertScaleAbs(image_acc))
                cv2.waitKey(1)
Esempio n. 18
0
def az_grid_borders(file):      

   cam_num = 1
   config_file = "conf/config-" + str(cam_num) + ".txt"
   config = read_config(config_file)
   loc_lat = config['device_lat']
   loc_lon = config['device_lng']
   loc_alt = config['device_alt']

   W = WCS(file)
   image_file = file.replace("-sd.new", ".jpg")
   cal_time = parse_file_date(image_file)

   location = EarthLocation.from_geodetic(float(loc_lon)*u.deg,float(loc_lat)*u.deg,float(loc_alt)*u.m)

   az_grid = cv2.imread(image_file)
   az_grid_np = cv2.cvtColor(az_grid, cv2.COLOR_BGR2GRAY)

   mypass = 0
   print ("Grid y is ", az_grid_np.shape[0])
   print ("Grid x is ", az_grid_np.shape[1])

   last_az_pp = 0
   last_at_pp = 0
   avg_az_pp = 0
   avg_at_pp = 0
   az_pp = 0
   at_pp = 0
   last_alt= 0
   last_az  = 0
   last_x  = None
   last_y  = None

   # solve left 
   x = 0
   for y in range(az_grid_np.shape[0]):
      (oalt,oaz) = convert_xy_to_altaz(x,y,W,location,cal_time)
      remat = 10 - (oalt % 10)
      remaz = 10 - (oaz % 10)
      if remat > 9.3:
         print ("LEFT-ALT", x,y,oaz,oalt)
      if remaz > 9.3:
         print ("LEFT-AZ", x,y,oaz,oalt)

   # solve right 
   x = az_grid_np.shape[1]
   for y in range(az_grid_np.shape[0]):
      (oalt,oaz) = convert_xy_to_altaz(x,y,W,location,cal_time)
      remat = 10 - (oalt % 10)
      remaz = 10 - (oaz % 10)
      if remat > 9.3:
         print ("RIGHT-ALT", x,y,oaz,oalt)
      if remaz > 9.3:
         print ("RIGHT-AZ", x,y,oaz,oalt)

   # solve top 
   y = 0
   for x in range(az_grid_np.shape[1]):
      (oalt,oaz) = convert_xy_to_altaz(x,y,W,location,cal_time)
      remat = 10 - (oalt % 10)
      remaz = 10 - (oaz % 10)
      if remat > 9.3:
         print ("TOP-ALT", x,y,oaz,oalt)
      if remaz > 9.3:
         print ("TOP-AZ", x,y,oaz,oalt)
   # solve bottom 
   y = az_grid.shape[0]
   for x in range(az_grid_np.shape[1]):
      (oalt,oaz) = convert_xy_to_altaz(x,y,W,location,cal_time)
      remat = 10 - (oalt % 10)
      remaz = 10 - (oaz % 10)
      if remat > 9.3:
         print ("BOTTOM-ALT", x,y,oaz,oalt)
      if remaz > 9.3:
         print ("BOTTOM-AZ", x,y,oaz,oalt)

   # solve middle 
   x = int(az_grid.shape[0] * .5)
   for y in range(az_grid_np.shape[1]):
      (oalt,oaz) = convert_xy_to_altaz(x,y,W,location,cal_time)
      remat = 10 - (oalt % 10)
      remaz = 10 - (oaz % 10)
      if remat > 9.3:
         print ("MID-ALT", x,y,oaz,oalt)
      if remaz > 9.3:
         print ("MID-AZ", x,y,oaz,oalt)

   # solve middle left
   x = int(az_grid.shape[0] * .33333)
   for y in range(az_grid_np.shape[1]):
      (oalt,oaz) = convert_xy_to_altaz(x,y,W,location,cal_time)
      remat = 10 - (oalt % 10)
      remaz = 10 - (oaz % 10)
      if remat > 9.3:
         print ("MIDL-ALT", x,y,oaz,oalt)
      if remaz > 9.3:
         print ("MIDL-AZ", x,y,oaz,oalt)

   # solve middle right 
   x = int(az_grid.shape[0] * .66666667)
   for y in range(az_grid_np.shape[1]):
      (oalt,oaz) = convert_xy_to_altaz(x,y,W,location,cal_time)
      remat = 10 - (oalt % 10)
      remaz = 10 - (oaz % 10)
      if remat > 9.3:
         print ("MIDR-ALT", x,y,oaz,oalt)
      if remaz > 9.3:
         print ("MIDR-AZ", x,y,oaz,oalt)
Esempio n. 19
0
#!/usr/bin/python3
import requests
import sys
from amscommon import read_config
cam_num = sys.argv[1]

config = read_config("conf/config-" + cam_num + ".txt")
r = requests.get(
    "http://" + config['cam_ip'] +
    "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=25&paramstep=0&paramreserved=0&"
)
Esempio n. 20
0
def draw_az_grid(file):
   first_list = []

   cam_num = 1
   config_file = "conf/config-" + str(cam_num) + ".txt"
   config = read_config(config_file)
   loc_lat = config['device_lat']
   loc_lon = config['device_lng']
   loc_alt = config['device_alt']

   W = WCS(file)
   image_file = file.replace("-sd.new", ".jpg")
   cal_time = parse_file_date(image_file)


   location = EarthLocation.from_geodetic(float(loc_lon)*u.deg,float(loc_lat)*u.deg,float(loc_alt)*u.m)

   az_grid = cv2.imread(image_file)
   az_grid_np = cv2.cvtColor(az_grid, cv2.COLOR_BGR2GRAY)

   #azpp1, altpp1 = characterize_altaz(0,0,320,180,W, location, cal_time)
   #azpp2, altpp2 = characterize_altaz(0,0,0,360,W, location, cal_time)
   #azpp3, altpp3 = characterize_altaz(0,0,640,0,W, location, cal_time)
   mypass = 0
   print ("Grid y is ", az_grid_np.shape[0])
   print ("Grid x is ", az_grid_np.shape[1])

   last_az_pp = 0
   last_at_pp = 0
   avg_az_pp = 0
   avg_at_pp = 0
   az_pp = 0
   at_pp = 0
   last_alt= 0
   last_az  = 0
   last_x  = None 
   last_y  = None 

   for x in range(0,az_grid_np.shape[1]):
      if (x % 50 == 0) :
         for y in range(az_grid_np.shape[0]):
            if mypass >= 1:
               mypass = mypass - 1
               #print ("pass is ", mypass, "skipping")
               continue
            else:
               if (y % 1 == 0) :
                  #print("TRACE: ", x,y,cal_time)
                  (oalt,oaz) = convert_xy_to_altaz(x,y,W,location,cal_time)
                  remat = 10 - (oalt % 10)
                  remaz = 10 - (oaz % 10)
                  #print ("REMAT/REMAZ: ", remat, remaz)
                  if remat < .7 or remat > 9.3:
                     print ("**** x,y,az,alt", remat, x,y,oaz,oalt)
                     az_grid[y,x] = [255,255,255]
                     mypass = 20
                  else:
                  #   print ("remat, x,y,az,alt", remat,x,y,oaz,oalt)
                     if avg_at_pp != 0:
                        skip = remat / avg_at_pp 
                        #print ("skip = ", skip)
                        mypass = skip * .5
                     if remat > 7:
                        mypass = 0 

                  if last_x != None:
                     if abs(x - last_x) != 0:
                        az_pp = abs(oaz - last_az) / abs(x - last_x)
                     else:
                        az_pp = 0
                     if abs(y - last_y) != 0:
                        at_pp = abs(oalt - last_alt) / abs(y - last_y)
                     else:
                        at_pp = 0
                     #print ("AZ/EL Per Pixel = ", last_az_pp, last_at_pp)
                     if avg_az_pp == 0:               
                        avg_az_pp = az_pp
                     avg_az_pp = (avg_az_pp + last_az_pp + az_pp) / 3
                     if avg_at_pp == 0:               
                        avg_at_pp = at_pp
                     avg_at_pp = (avg_at_pp + last_at_pp + at_pp) / 3
              
                  last_az_pp = az_pp  
                  last_at_pp = at_pp  
                  last_az = oaz
                  last_alt = oalt
                  last_x = x
                  last_y = y 



   for y in range(0,az_grid_np.shape[0]):
      if (y % 50 == 0) :
         for x in range(az_grid_np.shape[1]):
            if mypass >= 1:
               mypass = mypass - 1
               #print ("pass is ", mypass, "skipping")
               continue
            else:
               if (x % 1 == 0) :
                  (oalt,oaz) = convert_xy_to_altaz(x,y,W,location,cal_time)
                  remat = 10 - (oalt % 10)
                  remaz = 10 - (oaz % 10)
                  #print ("REMAT/REMAZ: ", remat, remaz)
                  if remaz < .7 or remaz > 9.3:
                     print ("**** x,y,az,alt", x,y,oaz,oalt)
                     az_grid[y,x] = [255,255,255]
                     mypass = 20
                  else:
                     #print ("remaz, x,y,az,alt", remaz,x,y,oaz,oalt)
                     if remaz > 8:
                        mypass = 20
                     if remaz < 8 and remaz > 5:
                        mypass = 8 
                     if remaz < 5 and remaz > 3:
                        mypass = 4 
                     if remaz < 3:
                        mypass = 0 






   cv2.imwrite("azgrid.png", az_grid)      
Esempio n. 21
0
from view import log_fireball_event
from amscommon import read_config, caldate

values = {}
values['datetime'] = "2017-06-22 13:27:58"
values['motion_frames'] = 2
values['cons_motion'] = 2
values['color'] = 1
values['straight_line'] = 1
values['meteor_yn'] = 0
values['bp_frames'] = 3

log_fireball_event(read_config(), "./test_data/20170622132758.avi",
                   "./test_data/20170622132758-summary.txt",
                   "./test_data/20170622132758-objects.jpg", values)
def read_noise(config_file, cam_num):
    #cv2.namedWindow('pepe')

    last_frame = None
    config = read_config(config_file)
    cur_settings = get_settings(config)
    new_brightness = int(cur_settings['Brightness'])
    cap = cv2.VideoCapture("rtsp://" + config['cam_ip'] + "/av0_1")
    print(config['cam_ip'])

    fcnn = 0
    nr = 0
    nrfc = 0
    nrc = 0
    cc = 0
    for fc in range(0, 1000):
        _, frame = cap.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        frame_time = time.time()
        format_time = datetime.datetime.fromtimestamp(
            int(frame_time)).strftime("%Y%m%d%H%M%S")
        if frame is not None:
            #frame[680:720, 0:620] = [255]
            #frame[340:360, 0:310] = [255]
            frame[460:480, 0:310] = [255]

        if last_frame is not None and fc > 10:
            image_diff = cv2.absdiff(
                last_frame,
                frame,
            )
            _, threshold = cv2.threshold(image_diff, 30, 255,
                                         cv2.THRESH_BINARY)
            noise = threshold.sum()
            if (noise > 0):
                nrc = nrc + 1
            if cc == 150:
                nr = nrc / nrfc
                print("Noise ratio is: ", nrc, nrfc, nr)
                nrc = 0
                nrfc = 0
                cc = 0
                if .02 <= nr < .05:
                    print("brightness is good.", new_brightness)
                    return ()
                if nr >= .05:
                    print("Too much noise, lower brightness")
                    if nr > .4:
                        new_brightness = new_brightness - 10
                    else:
                        new_brightness = new_brightness - 2
                    set_setting(config, "Brightness", new_brightness)
                    print("New Brightness", new_brightness)
                    if new_brightness < 60:
                        exit()
                else:
                    print("not enough noise, increase brightness")
                    new_brightness = new_brightness + 5
                    set_setting(config, "Brightness", new_brightness)
                    print("New Brightness", new_brightness)
                time.sleep(1)

            #cv2.imshow('pepe', threshold)
            #cv2.imshow('pepe', frame)
            #cv2.waitKey(1)
            cc = cc + 1

        last_frame = frame
        nrfc = nrfc + 1
Esempio n. 23
0
#!/usr/bin/python3

import os
import time
from amscommon import read_config
import netifaces
# system boot script runs each time system is booted.
# Handles first time registration and setup
# once setup simply does sanity check and logs to ams
time.sleep(30)
config = read_config()
try:
    if (config['device_lat'] != ''):
        print("setup.")
except:
    print("device not setup yet.")

fp = open("/home/pi/fireball_camera/booted.txt", "w")
fp.write("booted")
fp.close()

os.system(
    "cd /home/pi/fireball_camera; ./mkdevice.py > /home/pi/fireball_camera/boot-log.txt"
)

try:
    eth0_ip = netifaces.ifaddresses('eth0')[netifaces.AF_INET][0]['addr']
except:
    eth0_ip = "0.0.0.0"
try:
    wlan0_ip = netifaces.ifaddresses('wlan0')[netifaces.AF_INET][0]['addr']
def get_calibration_frames(config_file, cam_num):
    config = read_config(config_file)
    prev_settings = get_settings(config)
    #print (prev_settings)
    #exit();
    settings = custom_settings("Calibration", config)
    fp = open("/home/pi/fireball_camera/calnow" + str(cam_num), "w")

    # set brightness
    set_setting(config, "Brightness", settings['Brightness'])
    #print ("setting brightness to ", settings['Brightness'])
    # set BLC
    set_special(config, "1017", settings['BLC'])
    #print ("setting BLC to ", settings['BLC'])

    # set Gamma
    set_setting(config, "Gamma", settings['Gamma'])
    #print ("setting Gamma to ", settings['Gamma'])

    # set contrast
    set_setting(config, "Contrast", settings['Contrast'])
    time.sleep(2)
    #print ("setting Contrast to ", settings['Contrast'])

    # sense up
    r = requests.get(
        "http://" + config['cam_ip'] +
        "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=12&paramstep=0&paramreserved=0&"
    )
    time.sleep(3)

    os.system("./allsky6-calibrate.py read_noise " + str(cam_num))
    cap = cv2.VideoCapture("rtsp://" + config['cam_ip'] + "/av0_0")

    #read_noise(config_file, cam_num)

    cv2.setUseOptimized(True)
    #lock = open("/home/pi/fireball_camera/calibrate.txt", "w")
    time_start = time.time()
    time.sleep(15)

    med_arr = []
    final_image = None
    for i in range(0, 100, 1):
        _, frame = cap.read()
        #frame = cv2.resize(frame, (0,0), fx=1, fy=.75)
        if i % 15 != 0 or i == 0:
            if frame is not None:
                med_arr.append(frame)
                print("Getting frame.")
        else:
            print("LEN MED", len(med_arr))
            med_arr_np = np.array(med_arr)
            median_image = np.median(med_arr_np, axis=0)
            median = np.uint8(median_image)

            frame_img = Image.fromarray(median)
            if final_image is None:
                final_image = frame_img
            final_image = ImageChops.lighter(final_image, frame_img)

    #out_file = "/var/www/html/out/temp_upload/stack.jpg"

    frame_time = time.time()
    format_time = datetime.datetime.fromtimestamp(
        int(frame_time)).strftime("%Y%m%d%H%M%S")
    out_file = "{}/{}-{}.jpg".format("/mnt/ams2/cal", format_time, cam_num)
    final_image.save(out_file, "JPEG")

    time.sleep(10)
    # sense camera down
    r = requests.get(
        "http://" + config['cam_ip'] +
        "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=1058&paramctrl=50&paramstep=0&paramreserved=0&"
    )
    cap.release()

    # set BLC
    #set_special(config, "1017", "30")
    # set brightness contrast gamma and BLC back to original setting before calibration
    #set_setting(config, "Contrast", prev_settings['Contrast'])
    #print ("setting brightness back to ", prev_settings['Brightness'])
    #set_setting(config, "Brightness", prev_settings['Brightness'])
    #set_setting(config, "Gamma", prev_settings['Gamma'])

    time.sleep(3)
    os.system("rm /home/pi/fireball_camera/calnow" + str(cam_num))

    os.system("/home/pi/fireball_camera/sro-settings.py " + str(cam_num))
        config['cam_ip']
    ) + "/webs/btnSettingEx?flag=1000&paramchannel=0&paramcmd=" + str(
        field) + "&paramctrl=" + str(value) + "&paramstep=0&paramreserved=0"
    print(url)
    r = requests.get(url)
    print(r.text)


if sys.argv[1] == 'ip':
    config['cam_ip'] = sys.argv[2]
    cam_num = sys.argv[3]
else:
    try:
        cam_num = sys.argv[1]
        config_file = "conf/config-" + cam_num + ".txt"
        config = read_config(config_file)
    except:
        config = read_config(config_file)

cam_ip = config['cam_ip']
print(config['cam_ip'])
config['cam_pwd'] = "xrp23q"

#config = read_config()

#try:
#   cam_ip = sys.argv[1]
#except:
#   cam_ip = config['cam_ip']

#device_id = config['device_id']
Esempio n. 26
0
def diff_stills(sdate, cam_num, show_video):
   if show_video == 1:
      cv2.namedWindow('pepe')
   image_thresh = []
   med_last_objects = []
   last_objects = deque(maxlen=5) 
   diffed_files = []
   config = read_config("conf/config-1.txt")
   video_dir = "/mnt/ams2/SD/"
   images = []
   images_orig = []
   images_blend = []
   images_info = []
   count = 0
   last_image = None
   last_thresh_sum = 0
   hits = 0
   avg_cnt = 0
   avg_tot = 0
   avg_pts = 0
   count = 0

   glob_dir = video_dir + "proc/" + sdate + "/" + "*cam" + str(cam_num) + "-blend.jpg"
   report_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-report.txt"
   master_stack_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-master_stack.jpg"

   #cv2.namedWindow('pepe')
   mask_file = "conf/mask-" + str(cam_num) + ".txt"
   file_exists = Path(mask_file)
   mask_exists = 0
   still_mask = [0,0,0,0]
   if (file_exists.is_file()):
      print("File found.")
      ms = open(mask_file)
      for lines in ms:
         line, jk = lines.split("\n")
         exec(line)
      ms.close()
      mask_exists = 1
      (sm_min_x, sm_max_x, sm_min_y, sm_max_y) = still_mask
   diffs = 0
   image_list = []
   file_list = []
   sorted_list = []
   print ("Loading still images from ", glob_dir)

   for filename in (glob.glob(glob_dir)):
       capture_date = parse_file_date(filename)
       sun_status, sun_alt = day_or_night(config, capture_date)
       #if sun_status != 'day' and int(sun_alt) < -4:
          #print("NIGHTTIME", capture_date, filename, sun_status)
       file_list.append(filename)
       #else: 
       #   print ("This is a daytime or dusk file", filename)
   
   sorted_list = sorted(file_list)
   print ("Loading Images...")
   for filename in sorted_list:
      open_cv_image = cv2.imread(filename,0)
      orig_image = open_cv_image
      open_cv_image[440:480, 0:640] = [0]


      images_orig.append(orig_image)
      images.append(open_cv_image)



   print ("Finished Loading Images.", len(sorted_list))
   if len(sorted_list) == 0:
      print ("Pre-processing of stack blends has not happened yet. Aborting...")
      exit()
   diff_sums = []
   height , width =  open_cv_image.shape
   master_stack = None 
   objects = None
   last_line_groups = []
   last_points = []
   count = 0
   cnts_counts = []
   for filename in sorted_list:
      thresh_file = filename.replace("blend", "diff")
      file_exists = Path(thresh_file)
      if (file_exists.is_file()):
         open_cv_image = cv2.imread(thresh_file,0)
         image_thresh.append(open_cv_image)
         sum = np.sum(open_cv_image)
         diff_sums.append(sum)
         # file already exists, just load it and move forward

      else:
         # file doesn't exist yet so make it and save it. 


         print ("THESH: ", thresh_file)
         hit = 0
         detect = 0
         el = filename.split("/")
         fn = el[-1]
         current_image = images[count]
         current_filename = sorted_list[count]
   
         if count >= 2:
            before_image2 = images[count-2]
            before_image = images[count-1]
            before_filename = sorted_list[count-1]
         else:
            before_image2 = images[count+2]
            before_image = images[count+2]
            before_filename = sorted_list[count+2]
   
         if count >= len(sorted_list)-2:
            after_image2 = images[count-3]
            after_image = images[count-2]
            after_filename = sorted_list[count-2]
         else:
            after_image2 = images[count+2]
            after_image = images[count+1]
            after_filename = sorted_list[count+1]

         if count < 25:
            median = np.median(np.array(images[0:count+25]), axis=0)
         elif len(images) - count < 25:
            median = np.median(np.array(images[count-25:count]), axis=0)
         else:
            median = np.median(np.array(images[count-25:count]), axis=0)

         median = np.uint8(median)

         blur_med = cv2.GaussianBlur(median, (5, 5), 0)

         # find bright areas in median and mask them out of the current image
         tm = find_best_thresh(blur_med, 30, 1)

         md = np.median(blur_med)
         av = np.average(blur_med)
         #tm = md + (av /1)

         #median_thresh = cv2.adaptiveThreshold(blur_med,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,1)
         _, median_thresh = cv2.threshold(blur_med, tm, 255, cv2.THRESH_BINARY)


         (_, median_cnts, xx) = cv2.findContours(median_thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
         med_diff = cv2.absdiff(current_image.astype(current_image.dtype), median,)
         image_diff = cv2.absdiff(current_image.astype(current_image.dtype), before_image,)
         image_diff2 = cv2.absdiff(current_image.astype(current_image.dtype), before_image2,)
         aft_image_diff = cv2.absdiff(current_image.astype(current_image.dtype), after_image,)
         aft_image_diff2 = cv2.absdiff(current_image.astype(current_image.dtype), after_image2,)
         bef_aft_image_diff = cv2.absdiff(before_image.astype(current_image.dtype), after_image,)
         median_three = np.median((image_diff, image_diff2, aft_image_diff, aft_image_diff2, med_diff, current_image), axis=0)
         median_three = np.uint8(median_three)

         # block out bright parts of the median
         for (i,c) in enumerate(median_cnts):
            x,y,w,h = cv2.boundingRect(median_cnts[i])
            my = y - 30
            mx = x - 30
            mmy = y + 30
            mmx = x + 30
            if mmy >= current_image.shape[0]:
               mmy =current_image.shape[0] 
            if mmx >= current_image.shape[1]:
               mmx =current_image.shape[1] 
            image_diff[my:mmy, mx:mmx] = [0]
            #before_image[my:mmy, mx:mmx] = [0]



   
         md = np.median(current_image)
         av = np.average(current_image)
         print ("working on (md,av): ", filename, md, av)
         #thresh_limit = md + (av /1)
         thresh_limit = 10


         #_, thresh = cv2.threshold(median_three, thresh_limit, 255, cv2.THRESH_BINARY)
         thresh = cv2.adaptiveThreshold(median_three,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,-10)
         this_thresh = thresh.copy()
         cnts = []
         real_cnt = 0
         real_cnt_space = 0
         if count >= 1:
            # zero out the bright areas in the last image diff so they don't show up in this one. 
            (_, cnts, xx) = cv2.findContours(last_thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            for (i,c) in enumerate(cnts):
               x,y,w,h = cv2.boundingRect(cnts[i])
               this_thresh[y-20:y+h+20, x-20:x+w+20] = [0]
               # before_image[y-20:y+h+20, x-20:x+w+20] = [0]
               if w > 1 and h > 1:
                  real_cnt = real_cnt + 1
                  real_cnt_space = real_cnt_space + (w*h)

         last_thresh = thresh

         sum = np.sum(this_thresh)
         diff_sums.append(sum)
         #noise, this_thresh = find_noisy_cnts(this_thresh)
         #if count > 0:


            #cv2.putText(this_thresh, str(before_filename),  (5,460), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
            #cv2.putText(this_thresh, str(current_filename),  (5,470), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)

         if show_video == 1:
            cv2.imshow('pepe', cv2.convertScaleAbs(median_three))
            cv2.waitKey(100)
            cv2.imshow('pepe', this_thresh)
            cv2.waitKey(100)
         cv2.imwrite(thresh_file, this_thresh)

         image_thresh.append(this_thresh)
         #if (len(image_thresh) > 1000): 
         #   break
         cnts_counts.append((real_cnt,real_cnt_space))
         count = count + 1

   exit()
   md = np.median(diff_sums)
   mav = np.average(diff_sums)
   count = 0
   dfs_count = 0
   noise = 0
   for img in image_thresh:
      current_image = images[count]
      file = sorted_list[count]
      sum = diff_sums[count]
      #cnt_count,cnt_space = cnts_counts[count]
      print(file)
      #noise, img = find_noisy_cnts(img)

      good, good_angles, img = find_cnts(img)
      print ("CNTs,S,Noise,M,A,G,GA:", good, noise, sum, md, mav, good, good_angles)
      #edges = cv2.Canny(img, 30,255)
      blend = cv2.addWeighted(img, .2, current_image, .8,0)
      #cv2.imshow('pepe', img)
      #if good >= 0 and good_angles >= 0:
      #   dfs_count = dfs_count + 1
      #   while(1):
      #      k = cv2.waitKey(33)
      #      if k == 32:
      #         break
      #      if k == 27:
      #         exit()
      #else:
      #   cv2.waitKey(1)
      count = count + 1
   print ("Total Images:", count)
   print ("Total Diffs:", dfs_count)