예제 #1
0
def process_images(in_dir, out_dir):
    """
    Processes all images in the in_dir.

    :param in_dir: input image directory
    :param out_dir: output image directory
    """
    pipeline = PipeLine('Lane perception', [
        CalibrationNode(input='img', output='img'),
        ThresholdingNode(input='img', output='binary'),
        CuttingNode(input='binary', output='binary'),
        PerspectiveNode(
            input='binary', dtype=np.float32, output='binary_warped'),
        GrayThresholdingNode(
            gray_input='binary_warped', thresh=0.8, output='binary_warped'),
        LaneDetectionNode(binary_input='binary_warped',
                          prior_lane_input=None,
                          output='lane'),
        DrawLaneNode(lane_input='lane', mode='area', output='lane_img'),
        PerspectiveNode(input='lane_img', inv=True, output='lane_img'),
        OverlayImagesNode(
            bckgrnd_input='img', overlay_input='lane_img', output='lane_ar'),
        AddTextNode(lane_input='lane', img_input='lane_ar', output='lane_ar')
    ])

    filenames = os.listdir(in_dir)
    for filename in filenames:
        log.info('Processing image {}'.format(filename))
        img = cv2.imread(os.path.join(in_dir, filename))
        context = {'img': img}
        pipeline.passthrough(context)
        cv2.imwrite(os.path.join(out_dir, filename), context['lane_ar'])
예제 #2
0
def sampleAndRunLoop(vidSource):

    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
    if int(major_ver) < 3:
        fps = vidSource.get(cv2.cv.CV_CAP_PROP_FPS)
    else:
        fps = vidSource.get(cv2.CAP_PROP_FPS)

    if fps == 0:
        fps = 24

    sampleLen = getParam["SampleLength"]

    ret, frame = vidSource.read()
    sample = np.zeros((sampleLen, frame.shape[0], frame.shape[1], 3),
                      dtype=np.uint8)

    idx = 0

    pipeline = PipeLine(fps)

    while True:
        ret, frame = vidSource.read()

        if idx < sampleLen:
            sample[idx] = frame
            # continue
        else:
            # Slide sampling window
            sample = np.insert(sample[1:], -1, frame, axis=0)

        # Perform computation of frequency
        respiratoryRate = pipeline.run(sample)

        idx += 1

        # Display result on the output image
        cv2.putText(frame, "Frame: %d, %d bps" % (idx, respiratoryRate),
                    (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (0, 20, 255))
        cv2.imshow('output', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #3
0
 def location2d(self, camera, pipeline, frame=None):
     """
     calculates the 2d location [x z] between the object and the camera
     :param camera: the camera, can be either Camera or CameraList
     :param pipeline: a pipeline that returns the contour of the object
     :param frame: optional, a frame to be used instead of the next image from the camera
     :return: a 2d vector of the relative [x z] location between the object and the camera (in meters)
     """
     frame = camera.read() if frame is None else frame
     cnt = pipeline(frame)
     d_norm = self.distance(
         camera,
         pipeline + PipeLine(lambda f: np.sqrt(cv2.contourArea(cnt))))
     m = cv2.moments(cnt)
     frame_center = np.array(frame.shape[:2][::-1]) / 2
     vp = m['m10'] / (m['m00'] + 0.000001), m['m01'] / (m['m00'] + 0.000001)
     x, y = np.array(vp) - frame_center
     alpha = x * camera.view_range / frame_center[0]
     return np.array([np.sin(alpha), np.cos(alpha)]) * d_norm
예제 #4
0
    :param params: the hls values, 3x2 matrix of [hmin hmax]
                                                 [lmin lmax]
                                                 [smin smax]
    :return: binary threshold image
    """
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
    red, green, blue = params
    return cv2.inRange(frame, (red[0], green[0], blue[0]),
                       (red[1], green[1], blue[1]))


"""
pipeline from image to binary image of the fuel (annoying little yellow balls)
"""
pipeline = PipeLine(
    lambda frame: threshold(frame, red_detection_params),
    lambda frame: cv2.erode(frame, np.ones((3, 3))),
    lambda frame: cv2.dilate(frame, np.ones((3, 3)), iterations=4))
"""
pipeline from image to contour of largest (closest) fuel (ball)
"""
pipeline1 = pipeline + PipeLine(
    lambda frame: cv2.findContours(frame, cv2.RETR_TREE, cv2.
                                   CHAIN_APPROX_SIMPLE)[1],
    lambda cnts: sorted(cnts, key=lambda x: cv2.contourArea(x), reverse=True))
"""
pipeline from image to contours of fuel (balls)
"""

pipeline_cnts = pipeline + PipeLine(
    lambda frame: cv2.findContours(frame, cv2.RETR_TREE, cv2.
                                   CHAIN_APPROX_SIMPLE)[1],
import sys
sys.path.append("/Users/TOSUKUi/Documents/workspace/gta-self-driving/")

from pipeline import PipeLine
from capturing import CapturingWindowMSS
from preprocessing import Preprocessing
from cv2 import cv2
import numpy as np

cap = CapturingWindowMSS(os="linux")
preprocess = Preprocessing()
pl = PipeLine()
procedures = [cap, preprocess]

while 1:

    sct_img = pl.execute(procedures)
    print("aaa")
    print(sct_img)
    cv2.imshow('screen', np.array(sct_img))

    if cv2.waitKey(1) & 0xFF == ord('q'):
        cv2.destroyAllWindows()
        break
예제 #6
0
from pipeline import PipeLine
from mail import SendEmail
from pic import PictureMaker
from deleter import TrashCan

app = Flask(__name__)

# set it to a random string
app.secret_key = 'any random string that are long enough'

# set this to path/to/your/database/backend/program
database_exec_path = './train_modified'
rand_exec_path = './gen_rand'
graph_exec_path = './static/'

app.pipe = PipeLine(database_exec_path)
app.rand = PipeLine(rand_exec_path)
app.email = SendEmail()
app.pic_make = PictureMaker(graph_exec_path)
app.trash_can = TrashCan()

# add_train 1 C1001(长春-延吉西) C 4 2 硬卧 软卧
# 长春 xx:xx 05:47 xx:xx ¥0.0 ¥0.0
# 吉林 06:27 06:29 00:02 ¥478.97 ¥980.89
# 敦化 07:23 07:25 00:02 ¥911.62 ¥2748.9
# 延吉西 08:04 08:04 xx:xx ¥1454.54 ¥2489.5


class StationInfo(object):
    def __init__(self, seat_num):
        self.name = None
예제 #7
0
from pipeline import PipeLine
from pathlib import Path

if __name__ == "__main__":
    GrB = PipeLine.GenerateTarget(
        [Path('In/neo-N-terminal.csv'),
         Path('In/neo-C-terminal.csv')], "Granzyme B")
    ICs = PipeLine.Run(Path("In/IC selection NO ISO.fasta.gz"),
                       Path('Out/IC Selection HS NO ISO.csv'), GrB)
예제 #8
0
from pipeline import PipeLine
from steps import *

if __name__ == '__main__':
    pipeline = PipeLine()
    pipeline.add_bg_step(ElasticInitBGStep({'kill_at_end': False}))
    pipeline.add_bg_step(VisualizerInitBGStep({'kill_at_end': False}))
    pipeline.add_pipe(TweetBinStep(
        {
            'tweet-frequency': 'daily',
            'tweet-format': 'csv',
            'ignore-before': '2017-01-01'
        }))
    pipeline.add_pipe(FeatureExtractStep(
        {
            'jar-path': '../jars/TextFeatureExtractor.jar',
            'tweet-frequency': 'daily',
            'query-params': [
                'lka',
                'sri lanka', 'srilanka',
                'flag', 'celebration',
            ]
        }))
    pipeline.add_pipe(IKASLStep(
        {
            'jar-path': '../jars/IKASL.jar',
            'tweet-frequency': 'daily',
            'additional-args': {'htf': 0.02, '-max-nodes': 8}
        }))
    pipeline.add_pipe(LayerProcessStep({'tweet-frequency': 'daily'}))
예제 #9
0
def process_video(in_file, out_file):
    """
    Processes video.

    :param in_file: path to the input video
    :param out_file: path to the output video
    """
    preproc_pipeline = PipeLine('Lane perception', [
        ColorCvtNode(input='img', mode=ColorCvtNode.RGB2BGR, output='img'),
        CalibrationNode(input='img', output='img'),
        ThresholdingNode(input='img', output='binary'),
        CuttingNode(input='binary', output='binary'),
        PerspectiveNode(
            input='binary', dtype=np.float32, output='binary_warped'),
        GrayThresholdingNode(
            gray_input='binary_warped', thresh=0.8, output='binary_warped'),
        LaneDetectionNode(binary_input='binary_warped',
                          prior_lane_input='prior_lane',
                          output='lane'),
    ])
    postproc_pipeline = PipeLine('Lane Augmented Reality', [
        DrawLaneNode(lane_input='lane', mode='area', output='lane_img'),
        PerspectiveNode(input='lane_img', inv=True, output='lane_img'),
        OverlayImagesNode(
            bckgrnd_input='img', overlay_input='lane_img', output='lane_ar'),
        AddTextNode(lane_input='lane', img_input='lane_ar', output='lane_ar'),
        ColorCvtNode(
            input='lane_ar', mode=ColorCvtNode.BGR2RGB, output='lane_ar'),
    ])

    lane_stack = collections.deque(maxlen=2)
    last_good_lane = None

    def process_frame(frame):
        # point to outside vars
        nonlocal lane_stack
        nonlocal last_good_lane

        if len(lane_stack) > 0:
            prior_lane = lane_stack[0]
        else:
            log.warning('no more prior lanes left, starting over...')
            prior_lane = None

        context = {'img': frame, 'prior_lane': prior_lane}
        preproc_pipeline.passthrough(context)

        lane = context['lane']
        display_lane = last_good_lane if last_good_lane is not None else lane
        if not lane.validate(prior_lane=prior_lane):
            log.warning('new lane is too bad, keep using last good lane')
            if len(lane_stack) > 0:
                lane_stack.pop()  # rm one lane from the stacks bottom
        else:
            # smooth lane
            lanefits = [lane.lane_fit for lane in lane_stack]
            lanefits.append(lane.lane_fit)
            display_lane = lane.copy()
            display_lane.lane_fit = _smooth_lane_fit(lanefits)
            last_good_lane = display_lane

            lane_stack.appendleft(lane)

        context['lane'] = display_lane  # lane to display
        postproc_pipeline.passthrough(context)

        return context['lane_ar']

    video_input = VideoFileClip(in_file)  #.subclip(555/25, 560/25)
    video_processed = video_input.fl_image(process_frame)
    video_processed.write_videofile(out_file, audio=False)