コード例 #1
0
 def __init__(self,
              name_pattern,
              directory,
              frame_time,
              nimages=1,
              ntrigger=1,
              start_angle=0.,
              angle_per_frame=0.,
              image_nr_start=1,
              trigger_mode='exts',
              compression='bslz4'):
     
     self.detector = detector()
     self.beam_center = beam_center()
     
     self.name_pattern = name_pattern
     self.directory = directory
     self.frame_time = frame_time
     self.nimages = nimages
     self.ntrigger = ntrigger
     self.start_angle = start_angle
     self.angle_per_frame = angle_per_frame
     self.image_nr_start=1,
     self.trigger_mode = trigger_mode
     self.compression = compression
コード例 #2
0
 def __init__(self,
              name_pattern,
              directory,
              start, #dictionary of motor positions
              stop, #dictionary of motor positions
              vertical_range, #vertical single sweep range in milimiters
              frame_time=0.1, # in seconds
              number_of_points=None, #
              oscillation_start_angle=0, # angle in degrees
              total_oscillation_range=180, # angle in degrees
              degrees_per_frame=0.1, # angle per image in degrees
              degrees_of_overlap_between_neighboring_sweeps=1, # angle in degrees
              beam_horizontal_size=0.01): # beam width in mmm 
     
     self.name_pattern = name_pattern
     self.directory = directory
     self.start = start
     self.stop = stop
     self.vertical_range = vertical_range
     self.beam_horizontal_size = beam_horizontal_size
     if number_of_points is None:
         start_vector = np.array([start[motor] for motor in self.motors])
         stop_vector = np.array([stop[motor] for motor in self.motors])
         scan_length = np.linalg.norm(stop_vector - start_vector)
         self.number_of_points = int(np.floor(scan_length/self.beam_horizontal_size))
     print 'total length of principal helical line is %s' % scan_length
     print 'experiment will consist of %s vertical helical sweeps' % self.number_of_points
     self.oscillation_start_angle = oscillation_start_angle
     self.total_oscillation_range = total_oscillation_range
     self.degrees_per_frame = degrees_per_frame
     self.degrees_of_overlap_between_neighboring_sweeps = degrees_of_overlap_between_neighboring_sweeps
     self.frame_time = frame_time
     
     self.detector = detector()
     self.goniometer = goniometer()
コード例 #3
0
    while True:

        ret, frame = cap.read()
        if ret:
            start_time = time.time()
            frames = frame[:, :, ::-1]

            image = Image.fromarray(frames, 'RGB')
            width, high = image.size
            x_w = width / 416
            y_h = high / 416
            cropimg = image.resize((416, 416))
            imgdata = transforms(cropimg)
            imgdata = torch.FloatTensor(imgdata).view(-1, 3, 416, 416).cuda()

            y = detector(imgdata, 0.5, cfg.ANCHORS_GROUP)[0]

            for i in y:
                x1 = int((i[0]) * x_w)
                y1 = int((i[1]) * y_h)
                x2 = int((i[2]) * x_w)
                y2 = int((i[3]) * y_h)

                cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255))

            end_time = time.time()
            print(end_time - start_time)
        cv2.imshow('a', frame)
        cv2.waitKey(0)
コード例 #4
0
ファイル: descriptor.py プロジェクト: YvetteLaw/TEG
                        desp = get_feature(patch_gray, patch_grad, patch_dp, patchMask, ngray, ngrad, ndp, ps)

                        keypoints.append([u, v])
                        features.append(desp)
                except:
                    continue

    feats = scale(features, axis=0, with_mean=True, with_std=True)   # normalize
    return keypoints, feats


if __name__ == '__main__':
    import time
    from detector import *

    pathdir = './data/ManipulatorsDataset/'
    Kmat = np.loadtxt(pathdir + 'camera-intrinsics.txt')
    rgbfile = pathdir + 'mixture/illum/rgb_1.png'
    depthfile = pathdir + 'mixture/illum/depth_1.png'
    rgbImage = cv2.imread(rgbfile)
    grayImage = cv2.imread(rgbfile, 0)
    depthImage = cv2.imread(depthfile, cv2.IMREAD_UNCHANGED)

    kps = detector(grayImage, depthImage, Kmat)
    print(len(kps))

    time1 = time.time()
    kps, desp = descriptor(grayImage, depthImage, kps, Kmat)
    print(time.time() - time1)
    print(len(kps))
コード例 #5
0
def detector_runner():
    global member_list, keep_running
    detective = detector(member_list)
    while (keep_running):
        time.sleep(2)
        detective.ping_neighbours()
コード例 #6
0
ファイル: pic_test.py プロジェクト: wolfJava/actual_net
import cfg

if __name__ == '__main__':
    detector = Detector()
    data_for = transforms.Compose([transforms.ToTensor()])
    cap = cv2.VideoCapture("D:/ai/ai/train2017/train2017/000000000394.jpg")
    while True:
        # ret 是否读取到图片
        # 图片的数据
        ret, frame = cap.read()
        if ret:
            # OPENCV读出来的数据是BGR格式,这里转换成RGB
            frames = frame[:, :, ::-1]
            image = Image.fromarray(frames, 'RGB')
            width, high = image.size
            x_w = width / 416
            y_h = high / 416
            cropimg = image.resize((416, 416))
            imgdata = data_for(cropimg)
            imgdata = torch.FloatTensor(imgdata).view(-1, 3, 416, 416).cuda()
            y = detector(imgdata, 0.55, cfg.anchors_group)[0]
            print(y)
            for i in y:
                x1 = int((i[0]) * x_w)
                y1 = int((i[1]) * y_h)
                x2 = int((i[2]) * x_w)
                y2 = int((i[3]) * y_h)
                cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255))
        cv2.imshow('a', frame)
        cv2.waitKey(0)
コード例 #7
0
if __name__ == '__main__':
    camera = cv2.VideoCapture(video_filename)

    # Creates graph from saved GraphDef.
    create_graph()

    while (1):
        start = time.time()  # want to time each cycle. starting stopwatch.
        num_throw_away_frames = 30  # simulate speed of classifier in real-time situation
        for i in xrange(num_throw_away_frames):
            retval, temp = camera.read()

        retval, camera_capture = camera.read()
        file = "test_image.png"
        cv2.imwrite(file, camera_capture)
        vote, target_label = detector('test_image.png')
        print(target_label + ' is present.')

        end = time.time()
        print('Elapsed Time: ' +
              str(end - start))  # print out how long this detection cycle took

        print('********')

        ### uncomment this section if you want to see the images that
        ### detector classified.  if you do invoke this section, loop will
        ### pause on each image till keystroke, ESC will quit the program
        #if(vote):
        #  cv2.circle(camera_capture,(320, 240), 50, (0,255,0), 5)
        #cv2.imshow('image', camera_capture)
        #k = cv2.waitKey(0)