コード例 #1
0
def init(is_debug, pixformat, delay_time):
    #关闭串口,防止初始化过程溢出
    uart.deinit()
    uart2.deinit()

    sensor.reset()

    sensor.set_pixformat(sensor.RGB565)  #RGB565

    sensor.set_framesize(sensor.QVGA)  #320*240

    sensor.set_gainceiling(128)  #增益上限 2,4,8,16,32,64,128
    sensor.set_contrast(3)  #对比度 -3至3
    sensor.set_brightness(0)  #亮度。-3至+3
    sensor.set_saturation(3)  #饱和度。-3至+3
    sensor.set_auto_exposure(True)  #自动曝光

    sensor.skip_frames(time=delay_time)
    sensor.set_auto_gain(False)  # 在进行颜色追踪时,必须关闭
    sensor.set_auto_whitebal(False)  # 在进行颜色追踪时,必须关闭

    #重新打开串口
    uart.init(115200, timeout_char=1000)
    uart2.init(115200, timeout_char=1000)
    #判断是否debug模式
    global CompetitionScene
    if is_debug == True:
        CompetitionScene = 0
    else:
        CompetitionScene = 1
コード例 #2
0
ファイル: SigDetect.py プロジェクト: jackykwok123/IARRC
def run(argv):
    mode = argv
    if (mode == 0):
        #check Arrow
        sensor.reset()
        '''sensor.set_auto_gain(False)
        sensor.set_contrast(1)
        sensor.set_gainceiling(16)
        #sensor.set_windowing((200, 200)) # 240x240 center pixels of VGA
        sensor.set_framesize(sensor.QQVGA)
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_auto_whitebal(False)
        '''
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.QQVGA)
        sensor.set_vflip(True)
        sensor.set_hmirror(True)
        sensor.skip_frames(time=2000)

        findArrow()
    else:
        #check signal mode
        sensor.reset()
        sensor.set_auto_gain(False)
        sensor.set_auto_whitebal(True)
        sensor.set_contrast(-3)
        sensor.set_brightness(-3)
        sensor.set_gainceiling(8)
        sensor.set_pixformat(sensor.RGB565)
        sensor.set_vflip(True)
        sensor.set_framesize(sensor.VGA)
        sensor.set_windowing((240, 240))  # 240x240 center pixels of VGA
        #sensor.set_windowing((200, 200)) # 200x200 center pixels of VGA
        sensor.skip_frames(time=800)
        checkSignal()
コード例 #3
0
def test_color_bars():
    sensor.reset()
    # Set sensor settings
    sensor.set_brightness(0)
    sensor.set_saturation(3)
    sensor.set_gainceiling(8)
    sensor.set_contrast(2)

    # Set sensor pixel format
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.RGB565)

    # Enable colorbar test mode
    sensor.set_colorbar(True)

    # Skip a few camera to allow the sensor settle down
    for i in range(0, 100):
        image = sensor.snapshot()

    #color bars thresholds
    t = [
        lambda r, g, b: r < 70 and g < 70 and b < 70,  # Black
        lambda r, g, b: r < 70 and g < 70 and b > 200,  # Blue
        lambda r, g, b: r > 200 and g < 70 and b < 70,  # Red
        lambda r, g, b: r > 200 and g < 70 and b > 200,  # Purple
        lambda r, g, b: r < 70 and g > 200 and b < 70,  # Green
        lambda r, g, b: r < 70 and g > 200 and b > 200,  # Aqua
        lambda r, g, b: r > 200 and g > 200 and b < 70,  # Yellow
        lambda r, g, b: r > 200 and g > 200 and b > 200
    ]  # White

    # color bars are inverted for OV7725
    if (sensor.get_id() == sensor.OV7725):
        t = t[::-1]

    #320x240 image with 8 color bars each one is approx 40 pixels.
    #we start from the center of the frame buffer, and average the
    #values of 10 sample pixels from the center of each color bar.
    for i in range(0, 8):
        avg = (0, 0, 0)
        idx = 40 * i + 20  #center of colorbars
        for off in range(0, 10):  #avg 10 pixels
            rgb = image.get_pixel(idx + off, 120)
            avg = tuple(map(sum, zip(avg, rgb)))

        if not t[i](avg[0] / 10, avg[1] / 10, avg[2] / 10):
            raise Exception('COLOR BARS TEST FAILED.'
                            'BAR#(%d): RGB(%d,%d,%d)' %
                            (i + 1, avg[0] / 10, avg[1] / 10, avg[2] / 10))

    print('COLOR BARS TEST PASSED...')
コード例 #4
0
ファイル: main.py プロジェクト: tfhulbert/FG-B-2018
    def init(self, robot_):
        self.robot = robot_
        if self.robot == self.ROBOT_O:  #O_bot
            self.thresholds = [
                (35, 100, 26, 78, 22, 72),  #Ball
                (68, 100, -19, 27, 41, 81),  #Yellow Goal
                (20, 41, -6, 15, -55, -15)
            ]  # Blue Goal
            self.window = (59, 18, 184, 181)
        elif self.robot == self.ROBOT_P2:  #P2_bot
            self.thresholds = [
                (39, 100, 26, 78, 22, 72),  #Ball
                (68, 100, -19, 27, 41, 81),  #Yellow Goal
                (20, 41, -6, 25, -55, -15)
            ]  # Blue Goal
            self.window = (71, 4, 193, 191)

        # sensor setup
        sensor.reset()
        sensor.set_pixformat(sensor.RGB565)
        sensor.set_framesize(sensor.QVGA)  #Resolution
        sensor.set_windowing(self.window)

        sensor.skip_frames(time=1000)

        sensor.set_auto_exposure(False)
        sensor.set_auto_whitebal(False)
        # Need to let the above settings get in...
        sensor.skip_frames(time=250)

        # === GAIN ===
        curr_gain = sensor.get_gain_db()
        sensor.set_auto_gain(False, gain_db=curr_gain)

        # === EXPOSURE ===
        curr_exposure = sensor.get_exposure_us()
        sensor.set_auto_exposure(False, exposure_us=int(curr_exposure * 0.5))

        # === WHITE BAL ===
        sensor.set_auto_whitebal(
            False, rgb_gain_db=(-6.02073, -3.762909,
                                3.33901))  #Must remain false for blob tracking

        sensor.set_brightness(2)
        sensor.set_contrast(2)
        sensor.set_saturation(2)

        sensor.skip_frames(time=500)
コード例 #5
0
def init(is_debug,delay_time):
    uart.deinit()
    sensor.reset()
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)
    sensor.set_contrast(3)
    sensor.set_brightness(-3)
    sensor.set_auto_exposure(True)
    sensor.skip_frames(time = delay_time)
    sensor.set_auto_whitebal(False)
    uart.init(115200,timeout_char=1000)
    lcd.init()
    global CompetitionScene
    if is_debug==True:
        CompetitionScene=0
    else:
        CompetitionScene=1
コード例 #6
0
def test_color_bars():

    sensor.reset()
    # Set sensor settings
    sensor.set_brightness(0)
    sensor.set_saturation(0)
    sensor.set_gainceiling(8)
    sensor.set_contrast(2)

    # Set sensor pixel format
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.RGB565)

    # Enable colorbar test mode
    sensor.set_colorbar(True)

    # Skip a few frames to allow the sensor settle down
    # Note: This takes more time when exec from the IDE.
    for i in range(0, 100):
        image = sensor.snapshot()

    # Color bars thresholds
    t = [lambda r, g, b: r < 50  and g < 50  and b < 50,   # Black
         lambda r, g, b: r < 50  and g < 50  and b > 200,  # Blue
         lambda r, g, b: r > 200 and g < 50  and b < 50,   # Red
         lambda r, g, b: r > 200 and g < 50  and b > 200,  # Purple
         lambda r, g, b: r < 50  and g > 200 and b < 50,   # Green
         lambda r, g, b: r < 50  and g > 200 and b > 200,  # Aqua
         lambda r, g, b: r > 200 and g > 200 and b < 50,   # Yellow
         lambda r, g, b: r > 200 and g > 200 and b > 200]  # White

    # 320x240 image with 8 color bars each one is approx 40 pixels.
    # we start from the center of the frame buffer, and average the
    # values of 10 sample pixels from the center of each color bar.
    for i in range(0, 8):
        avg = (0, 0, 0)
        idx = 40*i+20 # center of colorbars
        for off in range(0, 10): # avg 10 pixels
            rgb = image.get_pixel(idx+off, 120)
            avg = tuple(map(sum, zip(avg, rgb)))

        if not t[i](avg[0]/10, avg[1]/10, avg[2]/10):
            raise Exception("COLOR BARS TEST FAILED. "
            "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10))

    print("COLOR BARS TEST PASSED...")
コード例 #7
0
def init(is_debug, pixformat, delay_time):
    uart.deinit()
    sensor.reset()
    if pixformat == "GRAY":
        sensor.set_pixformat(sensor.GRAYSCALE)
    elif pixformat == "RGB":
        sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QQVGA)
    sensor.set_gainceiling(128)
    sensor.set_contrast(3)
    sensor.set_brightness(0)
    sensor.set_saturation(3)
    sensor.set_auto_exposure(True)
    sensor.skip_frames(time=delay_time)
    sensor.set_auto_gain(False)
    sensor.set_auto_whitebal(False)
    uart.init(115200, timeout_char=1000)
    global CompetitionScene
    if is_debug == True:
        CompetitionScene = 0
    else:
        CompetitionScene = 1
コード例 #8
0
# This example shows off how to do MJPEG streaming in AccessPoint mode.
# Chrome, Firefox and MJpegViewer App on Android have been tested.
# Connect to OPENMV_AP and use this URL: http://192.168.1.1:8080 to view the stream.

import sensor, image, time, network, usocket, sys

SSID ='OPENMV_AP'    # Network SSID
KEY  ='1234567890'    # Network key (must be 10 chars)
HOST = ''           # Use first available interface
PORT = 8080         # Arbitrary non-privileged port

# Reset sensor
sensor.reset()
# Set sensor settings
sensor.set_contrast(1)
sensor.set_brightness(1)
sensor.set_saturation(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Init wlan module in AP mode.
wlan = network.WINC(mode=network.WINC.MODE_AP)
wlan.start_ap(SSID, key=KEY, security=wlan.WEP, channel=2)

# You can block waiting for client to connect
#print(wlan.wait_for_sta(10000))

def start_streaming(s):
    print ('Waiting for connections..')
    client, addr = s.accept()
コード例 #9
0
import sensor, image, lcd, time
import KPU as kpu

lcd.init(freq=15000000)
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
#sensor.set_hmirror(1)
#sensor.set_vflip(1)
sensor.set_windowing((224, 224))
sensor.set_brightness(2)
#sensor.set_contrast(-1)
#sensor.set_auto_gain(1,2)

sensor.run(1)
clock = time.clock()
classes = ['class_1']
task = kpu.load(0x300000)
anchor = (1, 1.2, 2, 3, 4, 3, 6, 4, 5, 6.5)
a = kpu.init_yolo2(task, 0.17, 0.3, 5, anchor)
while (True):
    clock.tick()
    img = sensor.snapshot()
    code = kpu.run_yolo2(task, img)
    print(clock.fps())
    if code:
        for i in code:
            a = img.draw_rectangle(i.rect())
            a = lcd.display(img)
            print(i.classid(), i.value())
            for i in code:
コード例 #10
0
def Square_Sum(data,datalen):
    i = 0
    z = 0
    for i in range(datalen):
        z = z + data[i]*data[i]
    return z

#设置摄像头相关参数
sensor.reset()
sensor.set_pixformat(sensor.RGB565)	#设置图片格式(彩色/灰度图)
sensor.set_framesize(sensor.QVGA)	#设置图像大小
sensor.skip_frames(10)			#等待设置生效
sensor.set_auto_whitebal(False)         #关闭白平衡
sensor.set_auto_gain(False)		#关闭自动亮度调节
sensor.set_contrast(0)          	#对比度 -3~3 7个调节
sensor.set_brightness(0)       		#亮度
sensor.set_saturation(0)                #饱和度

uart = UART(3,115200)                   #设置串口
led = pyb.LED(3)                        #提示灯
clock = time.clock()			#初始化时钟

#颜色识别区域的中心坐标
x_distance = y_distance = 0

#路径识别参数
ROIS = [ # [ROI, weight]
        #(0, 110, 160, 10),
        #(0, 100, 160, 10),
        (0, 090, 160, 10),
        (0, 080, 160, 10),
コード例 #11
0
ファイル: filters.py プロジェクト: 12019/openmv
# Filters are image functions that process a single line at a time.
# Since filters process lines on the fly, they run at sensor speed.
# Note: Only one filter can be enabled at a time.
import time, sensor

# Reset sensor
sensor.reset()

# Set sensor settings
sensor.set_contrast(1)
sensor.set_brightness(3)
sensor.set_saturation(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Enable BW filter
sensor.set_image_filter(sensor.FILTER_BW, lower=200, upper=255)

# Enable SKIN filter (Note doesn't work very well on RGB)
#sensor.set_image_filter(sensor.FILTER_SKIN)

# FPS clock
clock = time.clock()
while (True):
	clock.tick()
  	img = sensor.snapshot()
    # Draw FPS
    # Note: Actual FPS is higher, the IDE slows down streaming.
  	img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
コード例 #12
0
ファイル: video.py プロジェクト: PierreBizouard/openmv
import sensor, avi, time

REC_LENGTH = 15 # recording length in seconds

# Set sensor parameters
sensor.reset()
sensor.set_brightness(-2)
sensor.set_contrast(1)

sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.JPEG)

vid = avi.AVI("1:/test.avi", 320, 240, 15)

start = time.ticks()
clock = time.clock()

while ((time.ticks()-start) < (REC_LENGTH*1000)):
    clock.tick()
    # capture and store frame
    image = sensor.snapshot()
    vid.add_frame(image)
    print(clock.fps())

vid.flush()
コード例 #13
0
# ||| GAIN |||
curr_gain = sensor.get_gain_db()
sensor.set_auto_gain(False, gain_db=curr_gain)

# ||| EXPOSURE |||
curr_exposure = sensor.get_exposure_us()
sensor.set_auto_exposure(False, exposure_us = int(curr_exposure))

# ||| WHITE BAL |||
sensor.set_auto_whitebal(False,
rgb_gain_db=curr_wbal)

# ||| SET VALUES & WINDOWING |||
sensor.set_windowing(vwin_val)
sensor.set_saturation(3)
sensor.set_brightness(3)  #Change to -3
sensor.set_contrast(3)

# ||| INDICATOR LED |||
LED(1).on()
time.sleep(100)
LED(1).off()

# ||| SCANS FOR BIGGEST BLOB |||
def BiggestBlob(bBlob):
    if not bBlob:
        return None
    maxBlob = bBlob[0]
    for blob in bBlob:
        if blob.area() > maxBlob.area():
            maxBlob = blob
コード例 #14
0
ファイル: main.py プロジェクト: 1219484671/SCM
import sensor, image, time
from pyb import *

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(10)
sensor.set_auto_exposure(False)
sensor.set_auto_whitebal(False, (-6.317097, -6.02073, -6.774588))
sensor.set_brightness(-3)

uart = UART(3, 115200, timeout_char=1000)

red_threshold = (51, 9, 15, 88, -16, 66)  #(82, 18, 40, 90, 3, 51)
yellow_threshold = (82, 13, -29, 55, 26, 68)  #(82, 36, 2, 53, 29, 71)
blue_threshold = (68, 24, -34, 9, -59, 3)  #(64, 19, -7, 28, -60, -26)
area = 3500
time_out = 120 * 1000

servo1 = Servo(1)
servo2 = Servo(2)


def send_finish_flag():
    uart.writechar(0xFF)
    delay(1)
    uart.writechar(0x46)
    delay(1)
    uart.writechar(0x23)
    delay(1)
コード例 #15
0
#get the gains and exposure
gain_db = sensor.get_gain_db()
exposure_us = sensor.get_exposure_us()
rgb_gain_db = sensor.get_rgb_gain_db()
print("gain_db is " + str(gain_db))
print("exposure is " + str(exposure_us))
print("rgb_gain_db is " + str(rgb_gain_db))

# Set the gain and exposure as fixed (not concerned about the values)
sensor.set_auto_gain(False, gain_db)
sensor.set_auto_exposure(False, exposure_us)
sensor.set_auto_whitebal(False, rgb_gain_db)

# Setup contrast, brightness and saturation
sensor.set_contrast(0)  # range -3 to +3
sensor.set_brightness(0)  # range -3 to +3
sensor.set_saturation(0)  # range -3 to +3

# Disable night mode (auto frame rate) and black level calibration (BLC)
sensor.__write_reg(0x0E, 0b00000000)  # Disable night mode
sensor.__write_reg(0x3E, 0b00000000)  # Disable BLC

sensor.__write_reg(0x13, 0b00000000)  # disable automated gain

gain_db = sensor.get_gain_db()
exposure_us = sensor.get_exposure_us()
rgb_gain_db = sensor.get_rgb_gain_db()
print("gain_db is " + str(gain_db))
print("exposure is " + str(exposure_us))
print("rgb_gain_db is " + str(rgb_gain_db))
コード例 #16
0
# Importe de librerias
import sensor, image, lcd, time, utime
import KPU as kpu

# Configuración inicial de la pantalla LCD y la camara OV2640
lcd.init()  # Inicializa la pantalla
sensor.reset()  # Inicializa la camara
sensor.set_pixformat(sensor.RGB565)  # Define el formato de color de la imagen
sensor.set_framesize(
    sensor.QVGA)  # Establece la captura de imagen como QVGA (320x240)
sensor.set_windowing(
    (224, 224))  # Establece el tamaño de imagen con el que se entreno la red
sensor.set_vflip(1)  # Rotación vertical de la imagen
sensor.set_saturation(-3)  # Saturacion
sensor.set_brightness(-3)  # brightness
sensor.set_contrast(-3)  # contrast
lcd.clear()  # Limpia la pantalla y la deja en negro

# Descripción y carga del modelo
labels = ['Acaro', 'Bueno',
          'Manchado']  # Etiquetas de la ultima capa de la red
task = kpu.load(
    '/sd/3clases.kmodel')  # Acá va al ubicación del archivo .kmodel   (CARGA)
kpu.set_outputs(task, 0, 1, 1,
                3)  # Aqúi van las dimensiones de la ultima capa de la red

while (True):

    tick1 = utime.ticks_ms()
コード例 #17
0
ファイル: find_lines.py プロジェクト: Jaron0211/24th_TDK
# find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines.

enable_lens_corr = False  # turn on for straighter lines...

import sensor, image, time
import time
from pyb import UART

uart = UART(3, 19200)

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)  # grayscale is faster
sensor.set_framesize(sensor.QQVGA)

sensor.set_contrast(+2)
sensor.set_brightness(+3)
sensor.set_auto_gain(True)

sensor.skip_frames(time=2000)
clock = time.clock()

min_degree = 0
max_degree = 179

while (True):
    clock.tick()

    img = sensor.snapshot()

    line_img = sensor.get_fb()
    cir_img = sensor.get_fb()
コード例 #18
0
ファイル: OpenMVFirmware.py プロジェクト: odiezapha/DroneAI
     ir_led.off()
 elif (cmd == b'ledi1'):
     ir_led.on()
 elif (cmd == b'wlede'):
     watchdog_led = True
 elif (cmd == b'wledd'):
     watchdog_led = False
 elif (cmd == b'ctrt+'):
     contrast = max(contrast + 1, 3)
     sensor.set_contrast(constrast)
 elif (cmd == b'ctrt-'):
     contrast = min(contrast - 1, -3)
     sensor.set_contrast(constrast)
 elif (cmd == b'brgt+'):
     brightness = max(brightness + 1, 3)
     sensor.set_brightness(brightness)
 elif (cmd == b'brgt-'):
     brightness = min(brightness - 1, -3)
     sensor.set_brightness(brightness)
 elif (cmd == b'satr+'):
     saturation = max(saturation + 1, 3)
     sensor.set_saturation(saturation)
 elif (cmd == b'satr-'):
     saturation = min(saturation - 1, -3)
     sensor.set_saturation(saturation)
 elif (cmd == b'width'):
     usb.send(sensor.width())
 elif (cmd == b'heigh'):
     usb.send(sensor.height())
 elif (cmd == b'q4vga'):
     sensor.set_framesize(sensor.QQQQVGA)  #40x30
コード例 #19
0
ファイル: sensor.py プロジェクト: yushelf/openmv-project
import sensor

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)  # Wait for settings take effect.
sensor.set_windowing(30, 30)  #Sets the resolution of the camera to a
#sub resolution inside of the current resolution.
sensor.set_contrast(0)  #Set the camera image contrast. -3 to +3.
sensor.set_brightness(0)  #Set the camera image brightness. -3 to +3.
sensor.set_saturation(0)  #Set the camera image saturation. -3 to +3.
sensor.set_auto_gain(
    False)  #You need to turn off white balance too if you want to
#track colors.
sensor.set_auto_whitebal(
    False)  #You need to turn off white balance too if you want to
#track colors.
sensor.set_lens_correction(
    True)  #radi integer radius of pixels to correct (int).
#coef power of correction (int).
while (True):
    img = sensor.snapshot()  # Take a picture and return the image.
コード例 #20
0
import sensor, time

sensor.reset()
# Set sensor settings 
sensor.set_brightness(0)
sensor.set_saturation(0)
sensor.set_gainceiling(8)
sensor.set_contrast(2)

# Set sensor pixel format
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.RGB565)

# Enable colorbar test mode
sensor.set_colorbar(True)

# Skip a few frames to allow the sensor settle down 
for i in range(0, 30):
    image = sensor.snapshot()

#color bars thresholds
t = [lambda r, g, b: r < 50  and g < 50  and b < 50,   # Black
     lambda r, g, b: r < 50  and g < 50  and b > 200,  # Blue
     lambda r, g, b: r > 200 and g < 50  and b < 50,   # Red
     lambda r, g, b: r > 200 and g < 50  and b > 200,  # Purple
     lambda r, g, b: r < 50  and g > 200 and b < 50,   # Green
     lambda r, g, b: r < 50  and g > 200 and b > 200,  # Aqua
     lambda r, g, b: r > 200 and g > 200 and b < 50,   # Yellow
     lambda r, g, b: r > 200 and g > 200 and b > 200]  # White

#320x240 image with 8 color bars each one is approx 40 pixels.
コード例 #21
0
ファイル: detektor_polozaja_boce.py プロジェクト: opinau/cv
# bottleneck should be witouth any sticker...

user_exposure_time = 500  #camera exposure time, feel free to change. 900 is default value for tested setup

sensor.reset()  # Initialize the camera sensor
sensor.set_pixformat(
    sensor.GRAYSCALE
)  # set camera format to grayscale (color not important in this example)
sensor.set_framesize(sensor.QVGA)  # set camera resolution to QVGA 320 x 240
sensor.set_auto_whitebal(False)  # Turn off white balance
sensor.set_auto_exposure(
    False, exposure_us=user_exposure_time
)  # set exposure time, user changable (user_exposure_time variable)

#these setting are manually set in order to prevent camera to change it during use (no automatic setting in machine vision!)
sensor.set_brightness(0)  #set camera brightness
sensor.set_contrast(2)  #set camera contrast
sensor.set_saturation(0)  #set camera saturation

# Print out the initial gain for comparison.
print("Initial gain == %f db" % sensor.get_gain_db())
sensor.skip_frames(
    time=2000)  #small 2 seconds pause, so camera can update its settings

# calculating sensor gain
# user can change GAIN_SCALE factor in order to obtain more bright image
current_gain_in_decibels = sensor.get_gain_db()  #current sensor gain
print("Current Gain == %f db" %
      current_gain_in_decibels)  #reporting current gain
sensor.set_auto_gain(False,
                     gain_db=current_gain_in_decibels * GAIN_SCALE)  #new gain