Exemple #1
0
    def test_generate_detectors(self):
        print('=== Testing generate_detectors() ===')
        det = detector.detector()
        self.assertAlmostEqual(det.generate_detectors(config_fname),
                               70.32817314646061)
        self.assertEqual(det.num_dfiles, 0)
        self.det_sim_tests(det)
        det.generate_detectors(config_fname)

        det = detector.detector()
        list_fname = recon_folder + b'/det_list.txt'
        with open(list_fname, 'w') as f:
            f.writelines(['data/det_sim.dat\n', 'data/det_sim.dat\n'])
        config = DragonflyConfig(config_fname)
        config.modify_entry('emc', 'in_detector_list', 'det_list.txt')
        self.assertRaises(AssertionError, det.generate_detectors, config_fname)
        config.remove_entry('emc', 'in_detector_file')
        det.generate_detectors(config_fname)
        self.det_sim_tests(det)

        shutil.copyfile(recon_folder + b'/data/det_sim.dat',
                        recon_folder + b'/data/det_sim_test.dat')
        with open(list_fname, 'w') as f:
            f.writelines(['data/det_sim.dat\n', 'data/det_sim_test.dat\n'])
        det.generate_detectors(config_fname)
        self.det_sim_tests(det, single=False)
        self.det_sim_tests(det.nth_det(1), single=False)

        os.remove(list_fname)
        os.remove(recon_folder + b'/data/det_sim_test.dat')
        config.remove_entry('emc', 'in_detector_list')
        config.modify_entry('emc', 'in_detector_file',
                            'make_detector:::out_detector_file')
Exemple #2
0
 def test_generate_detectors(self):
     print('=== Testing generate_detectors() ===')
     det = detector.detector()
     self.assertAlmostEqual(det.generate_detectors(config_fname), 70.32817314646061) 
     self.assertEqual(det.num_dfiles, 0)
     self.det_sim_tests(det)
     det.generate_detectors(config_fname)
     
     det = detector.detector()
     list_fname = recon_folder+b'/det_list.txt'
     with open(list_fname, 'w') as f:
         f.writelines(['data/det_sim.dat\n', 'data/det_sim.dat\n'])
     config = DragonflyConfig(config_fname)
     config.modify_entry('emc', 'in_detector_list', 'det_list.txt')
     self.assertRaises(AssertionError, det.generate_detectors, config_fname)
     config.remove_entry('emc', 'in_detector_file')
     det.generate_detectors(config_fname)
     self.det_sim_tests(det)
     
     shutil.copyfile(recon_folder+b'/data/det_sim.dat', recon_folder+b'/data/det_sim_test.dat')
     with open(list_fname, 'w') as f:
         f.writelines(['data/det_sim.dat\n', 'data/det_sim_test.dat\n'])
     det.generate_detectors(config_fname)
     self.det_sim_tests(det, single=False)
     self.det_sim_tests(det.nth_det(1), single=False)
     
     os.remove(list_fname)
     os.remove(recon_folder+b'/data/det_sim_test.dat')
     config.remove_entry('emc', 'in_detector_list')
     config.modify_entry('emc', 'in_detector_file', 'make_detector:::out_detector_file')
Exemple #3
0
    def test_parse_detector_list(self):
        print('=== Testing parse_detector_list() ===')
        shutil.copyfile(recon_folder + b'/data/det_sim.dat',
                        recon_folder + b'/data/det_sim_test.dat')
        list_fname = 'test_det_list.txt'

        det = detector.detector()
        with open(list_fname, 'w') as f:
            f.writelines([
                (recon_folder + b'/data/det_sim.dat\n').decode('utf-8'),
                (recon_folder + b'/data/det_sim.dat\n').decode('utf-8')
            ])
        self.assertAlmostEqual(
            det.parse_detector_list(bytes(list_fname, 'utf-8')),
            70.32817314646061)
        self.assertEqual(det.num_dfiles, 2)
        self.det_sim_tests(det)
        self.assertIs(det.nth_det(1), None)

        det = detector.detector()
        with open(list_fname, 'w') as f:
            f.writelines([
                (recon_folder + b'/data/det_sim.dat\n').decode('utf-8'),
                (recon_folder + b'/data/det_sim_test.dat\n').decode('utf-8')
            ])
        det.parse_detector_list(bytes(list_fname, 'utf-8'))
        self.assertEqual(det.num_dfiles, 2)
        self.assertEqual(det.num_det, 2)
        npt.assert_array_equal(det.mapping, [0, 1] + 1022 * [0])
        self.det_sim_tests(det, single=False)
        self.det_sim_tests(det.nth_det(1), single=False)
        self.assertIs(det.nth_det(2), None)

        os.remove(list_fname)
        os.remove(recon_folder + b'/data/det_sim_test.dat')
Exemple #4
0
    def test_parse_detector(self):
        print('=== Testing parse_detector() ===')
        det = detector.detector()
        self.assertAlmostEqual(
            det.parse_detector(recon_folder + b'/data/det_sim.dat'),
            70.32817314646061)
        self.assertEqual(det.num_dfiles, 0)
        self.det_sim_tests(det)
        det.parse_detector(recon_folder + b'/data/det_sim.dat')

        det_fname = recon_folder + b'/data/det_sim_test.dat'
        with open(recon_folder + b'/data/det_sim.dat', 'r') as f:
            lines = f.readlines()
        lines[0] = lines[0].split()[0] + '\n'
        with open(det_fname, 'w') as f:
            f.writelines(lines)
        det.parse_detector(det_fname)
        self.det_sim_tests(det, old_style=True)
        os.remove(det_fname)

        det = detector.detector()
        self.assertAlmostEqual(
            det.parse_detector(recon_folder + b'/data/det_sim.h5'),
            70.32817314646061)
        self.assertEqual(det.num_dfiles, 0)
        self.det_sim_tests(det)
Exemple #5
0
 def test_parse_detector_list(self):
     print('=== Testing parse_detector_list() ===')
     shutil.copyfile(recon_folder+b'/data/det_sim.dat', recon_folder+b'/data/det_sim_test.dat')
     list_fname = 'test_det_list.txt'
     
     det = detector.detector()
     with open(list_fname, 'w') as f:
         f.writelines([(recon_folder+b'/data/det_sim.dat\n').decode('utf-8'), (recon_folder+b'/data/det_sim.dat\n').decode('utf-8')])
     self.assertAlmostEqual(det.parse_detector_list(bytes(list_fname, 'utf-8')), 70.32817314646061) 
     self.assertEqual(det.num_dfiles, 2)
     self.det_sim_tests(det)
     self.assertIs(det.nth_det(1), None)
     
     det = detector.detector()
     with open(list_fname, 'w') as f:
         f.writelines([(recon_folder+b'/data/det_sim.dat\n').decode('utf-8'), (recon_folder+b'/data/det_sim_test.dat\n').decode('utf-8')])
     det.parse_detector_list(bytes(list_fname, 'utf-8'))
     self.assertEqual(det.num_dfiles, 2)
     self.assertEqual(det.num_det, 2)
     npt.assert_array_equal(det.mapping, [0,1]+1022*[0])
     self.det_sim_tests(det, single=False)
     self.det_sim_tests(det.nth_det(1), single=False)
     self.assertIs(det.nth_det(2), None)
     
     os.remove(list_fname)
     os.remove(recon_folder+b'/data/det_sim_test.dat')
Exemple #6
0
def main():
    # define parameters ###########################################################:
    expname = 'myexpe/'
    data = 'icdar2013word'  # data for training/testing
    eMode = True  # edge detection
    CodeBookName1 = '../codebooks/Patch/codeBook.npy'  # codebook name
    CodeBookName2 = '../codebooks/Verify/codeBook.npy'  # codebook name

    coarseclfname = 'coarse'
    fineclfname = 'fine'
    wordgraphclfname = 'wordgraph'
    pdirname = '../data/'  # dir contains all experiment data
    cdirname = os.path.join(pdirname, expname)
    clfdir = os.path.join(cdirname, 'clf/')  # dir to save classifier
    rawdir = os.path.join(cdirname, 'raw/')  # dir for original image
    npydir = os.path.join(cdirname, 'npy/')  # dir for feature and label npy
    roitestdir = os.path.join(
        cdirname, 'roitest/')  # dir for region of interest fine detector
    predir = os.path.join(cdirname, 'pre/')  # dir for preprocessing
    predtxtdir = os.path.join(cdirname,
                              'pretxt/')  # dir for txt file of bounding boxes.
    txtdir = os.path.join(cdirname, 'txt/')  # dir for bounding box txt files
    # applying coarse detector ###########################################################:
    mode = 'adaboost'  # classification mode for detector
    lMode = 'foreground'  # foreground/whitespace
    fMode = 'context'  # local or contextual
    psize = 32
    ssize = 16
    nob = 3
    ratio = 0.9
    rrange = 30
    para0 = (float(psize - ssize) / psize)**2
    para1 = 1 - ratio
    rpower = ratio**numpy.asarray(range(rrange))

    data = pickle_load('detect', cdirname)
    codebook = codebook_load(CodeBookName1)
    myDetector = detector(codebook, data, psize, ssize, nob, rpower, para0,
                          para1, lMode, fMode, eMode)
    myClassifier = classifier()
    myClassifier.clf_load(coarseclfname, clfdir)
    myDetector.image_test(rawdir, predir, myClassifier.classifier)
    # applying fine detector and region growing ###########################################################:
    mode = 'adaboost'  # classification mode for detector
    lMode = 'foreground'  # foreground/whitespace
    fMode = 'local'  # local or contextual
    rpower = ratio**numpy.asarray(range(rrange))
    codebook = codebook_load(CodeBookName2)
    data = pickle_load('region', cdirname)
    myDetector = detector(codebook, data, psize, ssize, nob, rpower, para0,
                          para1, lMode, fMode, eMode)
    myClassifier = classifier(mode)
    myClassifier.clf_load(fineclfname, clfdir)
    myDetector.roi_test(predir, rawdir, roitestdir, myClassifier.classifier)
    # applying word graph ###########################################################:
    myClassifier = classifier()
    myClassifier.clf_load(wordgraphclfname, clfdir)
    wordbb = wordGraph_test(roitestdir, myClassifier.classifier)
    wordbb2pred(wordbb, predtxtdir)
def upload():
    if request.method == 'POST':
        f = request.files['file']
        f.save(secure_filename(f.filename))  #Save image at the server
        detector.detector(f.filename)
        words = classifier.classify()
        os.remove(f.filename)
        return render_template("show_entries.html", data=words)
Exemple #8
0
    def test_free_detector(self):
        print('=== Testing free_detector() ===')
        det = detector.detector()
        det.free_detector()
        det.free_detector()
        self.assertIsNone(det.num_pix)

        det = detector.detector()
        print(config_fname)
        det.generate_detectors(config_fname)
        det.free_detector()
        det.free_detector()
        self.assertIsNone(det.num_pix)
Exemple #9
0
 def test_free_detector(self):
     print('=== Testing free_detector() ===')
     det = detector.detector()
     det.free_detector()
     det.free_detector()
     self.assertIsNone(det.num_pix)
     
     det = detector.detector()
     print(config_fname)
     det.generate_detectors(config_fname)
     det.free_detector()
     det.free_detector()
     self.assertIsNone(det.num_pix)
Exemple #10
0
    def callback(self, rgb_img, depth_img, camera_info):
        try:
            rgb_image = self.bridge.imgmsg_to_cv2(rgb_img, "bgra8")
            depth_image = self.bridge.imgmsg_to_cv2(depth_img, "32FC1")
        except CvBridgeError as e:
            print(e)

        rgb_image = rgb_image[:, :, :-1]
        k = camera_info.K
        fx = k[0]
        fy = k[4]
        cx = k[2]
        cy = k[5]

        detector(rgb_image, depth_image, fx, fy, cx, cy)
 def __init__(self, pixel_size=0.075):
     try:
         self.wavelength_motor = PyTango.DeviceProxy('i11-ma-c03/op/mono1')
         self.detector = detector()
     except:
         pass
     self.pixel_size = pixel_size
Exemple #12
0
def solve(callback):
    global Status

    pos_wait = True  # enable camera check
    stickers = None

    stickers = detector(pos_wait)
    print('sticker:', stickers)

    for i in range(6):
        print(stickers[i * 9:i * 9 + 9])
    try:
        # R F2 R2 B2 L F2 R2 B2 R D2 L D' F U' B' R2 D2 F' U2 F' solved in 55.71 sec
        # L U F' U' F D' R' U' D2 R B2 R D2 F2 L' F2 U2 R D2 U' solved in 51.26 sec
        # L B R2 D2 B R2 D2 B' D2 F L2 F U R' D U2 L D' U2 48.25 sec
        # U B2 L2 U F2 R2 U R2 B2 D' F2 D2 R' D' U2 B' R B2 L2 F U2 38.05 sec
        solution = solver(stickers)
        print(solution)
        strt = time()
        controller(solution)
        print('done in', time() - strt, 'sec')
        callback()
    except:
        print('error')
        callback()
def predict():

	user_id = request.form['user_id']
	print(user_id)
	if not path.exists(user_id+'.txt'):
		get_list(user_id)
	
	user_list = read_list(user_id)
	print("user_list: ", user_list)

	start_time = int(request.form['start_time'])
	print(start_time)

	# m = mobilenet.mnet()
	# res = m.predict('test_image.jpg')
	# print(res)
	
	image = request.form['input_image']
	image = base64.b64decode(image)
	d = detector.detector()
	res = d.detect_objects(image)
	res = list(res.keys())
	print("res", res)
	overlap = list(set(res).intersection(set(user_list)))
	print("overlap", overlap)
	end_time = time.time()
	print(end_time-start_time)
	if len(overlap) > 0:
		return overlap[0]
	else:
		return ""
Exemple #14
0
    def __init__(self,
                 scan_range,
                 scan_exposure_time,
                 scan_start_angles, #this is an iterable
                 angle_per_frame,
                 name_pattern,
                 directory='/nfs/ruchebis/spool/2016_Run3/orphaned_collects',
                 image_nr_start=1):
                     
        self.goniometer = goniometer()
        self.detector = detector()
        self.beam_center = beam_center()
        
        scan_range = float(scan_range)
        scan_exposure_time = float(scan_exposure_time)
        
        nimages = float(scan_range)/angle_per_frame

        frame_time = scan_exposure_time/nimages
        
        self.scan_range = scan_range
        self.scan_exposure_time = scan_exposure_time
        self.scan_start_angles = scan_start_angles
        self.angle_per_frame = angle_per_frame
        
        self.nimages = int(nimages)
        self.frame_time = float(frame_time)
        self.count_time = self.frame_time - self.detector.get_detector_readout_time()
        
        self.name_pattern = name_pattern
        self.directory = directory
        self.image_nr_start = image_nr_start
        self.status = None
def translator(sentence):
    encText = urllib.parse.quote(sentence)
    langCode = detector.detector(sentence)
    data = "source=" + langCode + "&target=ko&text=" + encText
    url = "https://naveropenapi.apigw.ntruss.com/nmt/v1/translation"
    request = urllib.request.Request(url)
    request.add_header("X-NCP-APIGW-API-KEY-ID", PAPAGO_API_ID)
    request.add_header("X-NCP-APIGW-API-KEY", PAPAGO_API_SECRET)
    response = urllib.request.urlopen(request, data=data.encode("utf-8"))
    rescode = response.getcode()
    if (rescode == 200):
        response_body = response.read()
        json_data = response_body.decode('utf-8')
        json_dict = json.loads(json_data)
        trans_result = json_dict['message']['result']['translatedText']
        # logging.info(json_data)
        # logging.info(json_dict['message'])
        # logging.info(json_dict['message']['result'])
        # logging.info(json_dict['message']['result']['translatedText'])
        logging.info(trans_result)

    else:
        logging.info("Error Code:" + rescode)

    return trans_result
Exemple #16
0
    def test_slice_gen3d(self):
        print('=== Testing slice_gen3d() ===')
        det = detector.detector()
        det.parse_detector(recon_folder + b'/data/det_sim.dat')
        intens = 1.e-9 * np.fromfile(recon_folder +
                                     b'/data/intensities.bin').reshape(3 *
                                                                       (145, ))
        view = np.zeros(det.num_pix)

        quat = np.array([1., 0, 0, 0])
        interp.slice_gen3d(quat, view, intens, det)
        self.assertAlmostEqual(view.mean(), 223.922218946)
        npt.assert_array_almost_equal(
            view[:5],
            [0.03021473, 0.02554173, 0.01861631, 0.01085438, 0.00459315])
        interp.slice_gen3d(quat, view, intens, det, rescale=1.)
        self.assertAlmostEqual(view.mean(), 1.86882056344)
        npt.assert_array_almost_equal(
            view[:5],
            [-3.49942586, -3.66744152, -3.98371703, -4.5231864, -5.38318919])

        quat = np.array([np.sqrt(0.86), 0.1, 0.2, 0.3])
        interp.slice_gen3d(quat, view, intens, det)
        self.assertAlmostEqual(view.mean(), 184.449773553)
        npt.assert_array_almost_equal(
            view[:5],
            [0.00039123, 0.00014522, 0.00057308, 0.00185642, 0.00371838])
        interp.slice_gen3d(quat, view, intens, det, rescale=1.)
        self.assertAlmostEqual(view.mean(), 0.567310517859)
        npt.assert_array_almost_equal(
            view[:5],
            [-7.84620536, -8.83729446, -7.46449246, -6.28910363, -5.59446611])
Exemple #17
0
 def __init__(
     self,
     scan_range,
     scan_exposure_time,
     scan_start_angle,
     angle_per_frame,
     name_pattern,
     directory,
     image_nr_start,
     position=None,
     photon_energy=None,
     flux=None,
     transmission=None,
 ):
     self.goniometer = goniometer()
     self.detector = detector()
     self.beam_center = beam_center()
     self.energy_motor = energy_motor()
     self.resolution_motor = resolution_motor()
     self.protective_cover = protective_cover()
     self.transmission_motor = transmission_motor()
     self.scan_range = scan_range
     self.scan_exposure_time = scan_exposure_time
     self.scan_start_angle = scan_start_angle
     self.angle_per_frame = angle_per_frame
     self.image_nr_start = image_nr_start
     self.position = position
     self.photon_energy = photon_energy
     self.flux = flux
     self.transmission = transmission
     self.name_pattern = name_pattern
     self.directory = directory
     self._ntrigger = 1
     super(self, experiment).__init__()
def main(argv):

    if len(argv) == 3:
        img_dir = os.path.abspath(argv[1])
        dataset_dir = os.path.abspath(argv[2])
    else:
        img_dir = os.path.abspath("./Samples/")
        dataset_dir = os.path.abspath("./Dataset")

    img_files = os.listdir(img_dir)
    print(img_files)

    img_path = []
    img_name = []
    for f in img_files:
        if ".jpg" in f:
            img_path.append(img_dir + "/" + f)
            img_name.append(f)

    print("example filepath: ", img_path[1])

    detector = d.detector(load_keras_model=False)

    detected = 0
    for i, f in enumerate(img_path):
        detector.newImage(f)
        I = detector.getROI()

        if I is not None:
            detector.saveDigits(dataset_dir, name=img_name[i])
            detected += 1

    print("Image directory:", img_dir)
    print("Dataset directory:", dataset_dir)
    print("detected ", detected, "of ", len(img_path))
Exemple #19
0
    def test_slice_gen2d(self):
        print('=== Testing slice_gen2d() ===')
        det = detector.detector()
        det.parse_detector(recon_folder + b'/data/det_sim.dat', norm_flag=-3)
        intens = np.arange(145 * 145 * 3).astype('f8').reshape(3, 145, 145)
        view = np.zeros(det.num_pix)

        angle = np.array([1.37 * np.pi])
        interp.slice_gen2d(angle, view, intens, det)
        self.assertAlmostEqual(view.mean(), 10512.)
        npt.assert_array_almost_equal(
            view[:5],
            [6674.995665, 6808.058374, 6941.174136, 7074.339593, 7207.551378])
        interp.slice_gen2d(angle, view, intens, det, rescale=1.)
        self.assertAlmostEqual(view.mean(), 9.1579227696454524)
        npt.assert_array_almost_equal(
            view[:5], [8.806124, 8.825862, 8.845226, 8.864229, 8.882885])

        angle = np.array([3.14 * np.pi])
        interp.slice_gen2d(angle, view, intens[1:], det)
        self.assertAlmostEqual(view.mean(), 31537.)
        npt.assert_array_almost_equal(view[:5], [
            34414.902109, 34489.21956, 34563.301629, 34637.146226, 34710.751266
        ])
        interp.slice_gen2d(angle, view, intens[1:], det, rescale=1.)
        self.assertAlmostEqual(view.mean(), 10.349731872416205)
        npt.assert_array_almost_equal(
            view[:5], [10.446245, 10.448402, 10.450548, 10.452682, 10.454805])
Exemple #20
0
 def __init__(self):
     self.distance_motor = PyTango.DeviceProxy('i11-ma-cx1/dt/dtc_ccd.1-mt_ts')
     self.wavelength_motor = PyTango.DeviceProxy('i11-ma-c03/op/mono1')
     self.det_mt_tx = PyTango.DeviceProxy('i11-ma-cx1/dt/dtc_ccd.1-mt_tx') #.read_attribute('position').value - 30.0
     self.det_mt_tz = PyTango.DeviceProxy('i11-ma-cx1/dt/dtc_ccd.1-mt_tz') #.read_attribute('position').value + 14.3
     self.detector = detector()
     self.pixel_size = 75e-6
Exemple #21
0
    def test_slice_merge2d(self):
        print('=== Testing slice_merge2d() ===')
        det = detector.detector()
        qmax = det.parse_detector(recon_folder + b'/data/det_sim.dat',
                                  norm_flag=-3)
        view = np.ascontiguousarray(det.pixels[:, 2])
        angle = np.array([1.37 * np.pi])
        model = np.zeros((3, 145, 145))
        weight = np.zeros_like(model)
        interp.slice_merge2d(angle, view, model, weight, det)
        npt.assert_array_almost_equal(model, weight)
        npt.assert_array_almost_equal(
            model[0, 88:91, 82:85],
            [[1.013543, 1.013526, 0.95921], [0.948738, 0.988016, 0.986792],
             [1.013526, 0.985034, 0.98392]])

        view = np.ascontiguousarray(det.pixels[:, 2]) * np.arange(det.num_pix)
        model = np.zeros((3, 145, 145))
        weight2 = np.zeros_like(model)
        interp.slice_merge2d(angle, view, model, weight2, det)
        npt.assert_array_almost_equal(weight, weight2)
        npt.assert_array_almost_equal(model[0, 88:91, 82:85],
                                      [[3590.950843, 3495.003523, 3221.593928],
                                       [3314.725198, 3367.85047, 3269.304046],
                                       [3513.177067, 3323.48564, 3226.194119]])
Exemple #22
0
def vrd_demo():
    from detector import detector
    im_path = 'img/3845770407_1a8cd41230_b.jpg'
    det = detector()
    vrdet = vrd_module()
    det_res = det.det_im(im_path)
    vrd_res = vrdet.relation_im(im_path, det_res)
    print vrd_res
Exemple #23
0
def setData():
    while True:
        print("hello world")
        now = datetime.datetime.now().strftime('%H:%M:%S')
        height = detector.detector()
        data = {'time': now, 'data': height}
        #data = {'time': now, 'data': random.randint(1, 10)}
        return jsonify(data)  # 将数据以字典的形式传回
def detect(args):

    if (args.video):
        # Open the video file
        if not os.path.isfile(args.video):
            print("Input video file ", args.video, " doesn't exist")
            sys.exit(1)
        cap = cv.VideoCapture(args.video)
    else:
        # Webcam input
        cap = cv.VideoCapture(0)

    if (args.logpath):
        fieldsname = [
            'framecout', 'id', 'type', 'left', 'top', 'right', 'bottom',
            'confidence'
        ]
        logfile = open(args.logpath, 'w')
        logwriter = csv.DictWriter(logfile, fieldnames=fieldsname)

    detect = detector.detector("yolov3", "weights/yolov3.cfg",
                               "weights/yolov3.weights", "weights/coco.names")

    framecout = 0

    while cv.waitKey(1) < 0:

        # get frame from the video
        hasFrame, frame = cap.read()

        # Stop the program if reached end of video
        if not hasFrame:
            print("Done processing !!!")
            cv.waitKey(3000)
            break

        objlist = detect.detect(frame)

        for obj in objlist:
            drawBox(frame, obj)
            logwriter.writerow({
                'framecout': framecout,
                'id': obj.id,
                'type': obj.type,
                'left': obj.left,
                'top': obj.top,
                'right': obj.right,
                'bottom': obj.bottom,
                'confidence': obj.confidence
            })

        framecout += 1

        cv.imshow('detect', frame)

    #cv.destroyAllWindows()
    return objlist
Exemple #25
0
    def __init__(self, name, zpos, rmin, rmax, l, ang, ell, eln):
        #vmagnet.__init__(self)

        #upper part
        xup = rmin + (rmax-rmin)/2
        self.upper = detector(name+"_upper", zpos, xup, l, rmax-rmin, ang, ell, eln)
        self.upper.label = name+" #rightarrow"

        #lower part
        xdown = -rmin - (rmax-rmin)/2
        self.lower = detector(name+"_lower", zpos, xdown, l, rmax-rmin, ang, ell, eln)
        self.lower.label = "#leftarrow"
        self.lower.label_down = True

        ell[self.upper.name] = self.upper
        eln.append(self.upper.name)

        ell[self.lower.name] = self.lower
        eln.append(self.lower.name)
Exemple #26
0
def inspection_p():
    global solution
    solution = []
    solutionvar.set('')
    for i in range(6):
        for j in range(8):
            if 1 < i < 4 or 1 < j < 4:
                entry[i][j]['bg'] = 'gray'
    colors0 = detector()
    for i in range(6):
        for j in range(8):
            if 1 < i < 4 or 1 < j < 4:
                if colors0[i][j] != '':
                    entry[i][j]['bg'] = dic[colors0[i][j]]
                else:
                    entry[i][j]['bg'] = 'gray'
    with open('log.txt', mode='w') as f:
        f.write(str(colors0) + '\n')
    solution0, cost0 = solver(colors0)
    if solution0 == -1:
        print('cannot solve!')
        solutionvar.set('cannot solve!')
        return
    colors1 = rotate_colors(colors0)
    solution1, cost1 = solver(colors1)
    if cost0 <= cost1:
        solution = solution0
        cost = cost0
        with open('log.txt', mode='a') as f:
            f.write('0\n')
    else:
        solution = solution1
        cost = cost1
        with open('log.txt', mode='a') as f:
            f.write('1\n')
        move_actuator(0, 0, -90, 200)
        move_actuator(1, 0, 90, 200)
        sleep(0.3)
    if solution == -1:
        solution = []
        print('cannot solve!')
        solutionvar.set('cannot solve!')
        return
    with open('log.txt', mode='a') as f:
        f.write(str(cost) + '\n')
        f.write(str(solution) + '\n')
    solutionvar.set('cost: ' + str(cost) + ' ex: ' +
                    str(round(cost * 0.083, 2)) + 's')
    grab = solution[0][0][0] % 2
    for j in range(2):
        move_actuator(j, grab, 1000)
    sleep(0.2)
    for j in range(2):
        move_actuator(j, (grab + 1) % 2, 2000)
    print(solution)
Exemple #27
0
 def __init__(self, path=".", tracker=None):
     self.detector = detector.detector()
     self.w = 1920
     self.h = 1080
     self.clipDets = []
     if tracker is None:
         self.tracker = {"Vs": [2000], "Vxy": [100]}
     else:
         self.tracker = tracker
     self.abnormal = np.array([0, 0, 0, 0, 0])
     self.totalTracker = 0
Exemple #28
0
def main():
    img_file = sys.argv[1]
    img = cv2.imread(img_file)
    list_of_boxes = detector(img_file)  ##using opencv to detect barcode
    result = open("result.txt", 'w')  ##writing barcode to file

    for box in list_of_boxes:
        target_img = img[box[0]:box[1]][box[2]:box[3]]  #dimensions of barcode
        barcode = decode(target_img)  #decode barcode to utf-8
        data = getInfo(barcode)  #using UPC database for pincode
        result.write(str(data) + '\n')  # writing pincode to file
Exemple #29
0
 def allocate_iterate(self):
     itr = iterate.iterate()
     det = detector.detector()
     dset = dataset.dataset(det)
     param = params.params()
     qmax = det.generate_detectors(config_fname)
     dset.generate_data(config_fname)
     param.generate_params(config_fname)
     dset.generate_blacklist(config_fname)
     itr.generate_iterate(config_fname, qmax, param, det, dset)
     return itr, det, dset, param, qmax
Exemple #30
0
def main():
    img_file = sys.argv[1]
    img = cv2.imread(img_file)
    list_of_boxes = detector(img_file)
    result = open("result.txt", 'w')

    for box in list_of_boxes:
        target_img = img[box[0]:box[1]][box[2]:box[3]]
        barcode = decode(target_img)
        data = getInfo(barcode)
        result.write(str(data) + '\n')
Exemple #31
0
 def allocate_iterate(self):
     itr = iterate.iterate()
     det = detector.detector()
     dset = dataset.dataset(det)
     param = params.params()
     qmax = det.generate_detectors(config_fname)
     dset.generate_data(config_fname)
     param.generate_params(config_fname)
     dset.generate_blacklist(config_fname)
     itr.generate_iterate(config_fname, qmax, param, det, dset)
     return itr, det, dset, param, qmax
Exemple #32
0
def post():
    sentence = request.form['input']
    langCode = detector.detector(sentence)
    if (langCode != "ko"):
        trans_result = translator.translator(sentence)
        max_out, result, sorted_result = get_prediction(trans_result)
    else:
        max_out, result, sorted_result = get_prediction(sentence)

    obj['prediction'] = {'emotion': max_out, 'data': result}

    return obj
Exemple #33
0
 def __init__(self,
              vertical_range,
              horizontal_range,
              number_of_rows,
              number_of_columns,
              scan_exposure_time,
              scan_start_angle=None,
              scan_range=0.01,
              image_nr_start=1,
              scan_axis='horizontal', # 'horizontal' or 'vertical'
              direction_inversion=True,
              method='md2', # possible methods: "md2", "helical"
              zoom=None, # by default use the current zoom
              name_pattern='grid_$id',
              directory='/nfs/ruchebis/spool/2016_Run3/orphaned_collects'): 
     
     self.goniometer = goniometer()
     self.detector = detector()
     self.camera = camera()
     self.guillotine = protective_cover()
     self.beam_center = beam_center()
     
     self.scan_axis = scan_axis
     self.method = method
     self.vertical_range = vertical_range
     self.horizontal_range = horizontal_range
     self.shape = numpy.array((number_of_rows, number_of_columns))
     self.number_of_rows = number_of_rows
     self.number_of_columns = number_of_columns
     
     self.frame_time = scan_exposure_time
     self.count_time = self.frame_time - self.detector.get_detector_readout_time()
     
     self.scan_start_angle = scan_start_angle
     self.scan_range = scan_range
     
     if self.scan_axis == 'horizontal':
         self.line_scan_time = self.frame_time * self.number_of_columns
         self.angle_per_frame = self.scan_range / self.number_of_columns
     else:
         self.line_scan_time = self.frame_time * self.number_of_rows
         self.angle_per_frame = self.scan_range / self.number_of_rows
     
     self.image_nr_start = image_nr_start
     
     self.direction_inversion = direction_inversion
     
     self.name_pattern = name_pattern
     self.directory = directory
     
     self.method = method
     self.zoom = zoom
     super(self, experiment).__init__()
Exemple #34
0
    def __init__(self, model_path=None, reuse_variables=None):
        self.input_images = tf.placeholder(tf.float32,
                                           shape=[None, None, None, 3],
                                           name='input_images')
        self.input_rois = tf.placeholder(tf.float32,
                                         shape=[
                                             None, FLAGS.RoiHeight,
                                             FLAGS.MaxRoiWidth,
                                             FLAGS.sharedFeatureChannel
                                         ],
                                         name='input_rois')
        self.input_ws = tf.placeholder(tf.float32,
                                       shape=[
                                           None,
                                       ],
                                       name='input_ws')

        self.NUM_CLASSES = NUM_CLASSES
        self.decode_maps = decode_maps

        self.reuse_variables = reuse_variables
        with tf.variable_scope(tf.get_variable_scope(),
                               reuse=self.reuse_variables):
            self.sharedFeatures = sharedConv.model(self.input_images,
                                                   is_training=False)
            self.det = detector.detector(self.sharedFeatures)
            self.f_score = self.det.F_score
            self.f_geometry = self.det.F_geometry
            self.rg = recognizer.recognizer(self.input_rois,
                                            self.input_ws,
                                            self.NUM_CLASSES,
                                            1.,
                                            is_training=False)
            self.conf = self.rg.conf
            self.ans = self.rg.ans
            self.sess = None

        if model_path:
            self.checkpoint_path = model_path
            self.global_step = tf.get_variable(
                'global_step', [],
                initializer=tf.constant_initializer(0),
                trainable=False)
            self.variable_averages = tf.train.ExponentialMovingAverage(
                0.997, self.global_step)
            self.saver = tf.train.Saver(
                self.variable_averages.variables_to_restore())
            self.sess = tf.Session(config=tf.ConfigProto(
                gpu_options=tf.GPUOptions(
                    per_process_gpu_memory_fraction=0.05)))
            print('Restore from {}'.format(self.checkpoint_path))
            self.saver.restore(self.sess, self.checkpoint_path)
Exemple #35
0
 def check_cargo(self, scr):
     template = Image.open('img_templates/cargo.jpg')
     coords = detector(scr, template)
     pyautogui.moveTo(coords[0], coords[1])
     time.sleep(1)
     sc = ImageGrab.grab()
     text = read_screen_neg(sc, (coords[0] - 90, coords[1] - 130, coords[0] + 75, coords[1] - 15), 100)
     text = text.replace('%', '')
     idx = text.find('Ore Hold')
     try:
         self.cargo = float(text[idx + 8:idx + 8 + 6])
     except:
         self.cargo = 0
Exemple #36
0
    def __init__(self, *args):
        """
        Description:
        """
        GenericDiffractometer.__init__(self, *args)

        # Hardware objects ----------------------------------------------------
        self.zoom_motor_hwobj = None
        self.omega_reference_motor = None
        self.centring_hwobj = None
        self.minikappa_correction_hwobj = None
        self.nclicks = None
        self.step = None
        self.centring_method = None
        self.collecting = False

        # Channels and commands -----------------------------------------------
        self.chan_calib_x = None
        self.chan_calib_y = None
        self.chan_current_phase = None
        self.chan_head_type = None
        self.chan_fast_shutter_is_open = None
        self.chan_state = None
        self.chan_status = None
        self.chan_sync_move_motors = None
        self.chan_scintillator_position = None
        self.chan_capillary_position = None
        self.cmd_start_set_phase = None
        self.cmd_start_auto_focus = None
        self.cmd_get_omega_scan_limits = None
        self.cmd_save_centring_positions = None
        self.centring_time = None
        # Internal values -----------------------------------------------------
        self.use_sc = False
        self.omega_reference_pos = [0, 0]
        self.reference_pos = [680, 512]

        self.goniometer = goniometer()
        self.camera = camera()
        self.detector = detector()

        self.md2_to_mxcube = dict(
            [(key, value) for key, value in self.motor_name_mapping]
        )
        self.mxcube_to_md2 = dict(
            [(value, key) for key, value in self.motor_name_mapping]
        )

        self.log = logging.getLogger("HWR")
Exemple #37
0
    def __init__(self, images, brboxes, reuse_variables=None):
        self.reuse_variables = reuse_variables
        with tf.variable_scope(tf.get_variable_scope(), reuse=self.reuse_variables):
            self.sharedFeatures = sharedConv.model(images, is_training=True)
            self.det = detector.detector(self.sharedFeatures)
            self.rois, self.ws = RoiRotate.RoiRotate(self.sharedFeatures, FLAGS.features_stride)(brboxes)
            self.rg = recognizer.recognizer(self.rois, self.ws, NUM_CLASSES, FLAGS.keepProb, is_training=True)

        # add summary
        if self.reuse_variables is None:
            org_rois, org_ws = RoiRotate.RoiRotate(images, 1)(brboxes, expand_w=60)
            # org_rois shape [b, 8, 64, 3]
            tf.summary.image('input', images)
            tf.summary.image('score_map_pred', self.det.F_score * 255)
            tf.summary.image('geo_map_0_pred', self.det.F_geometry[:, :, :, 0:1])
            tf.summary.image('org_rois', org_rois, max_outputs=12)
Exemple #38
0
 def test_parse_detector(self):
     print('=== Testing parse_detector() ===')
     det = detector.detector()
     self.assertAlmostEqual(det.parse_detector(recon_folder+b'/data/det_sim.dat'), 70.32817314646061) 
     self.assertEqual(det.num_dfiles, 0)
     self.det_sim_tests(det)
     det.parse_detector(recon_folder+b'/data/det_sim.dat')
     
     det_fname = recon_folder+b'/data/det_sim_test.dat'
     with open(recon_folder+b'/data/det_sim.dat', 'r') as f:
         lines = f.readlines()
     lines[0] = lines[0].split()[0]+'\n'
     with open(det_fname, 'w') as f:
         f.writelines(lines)
     det.parse_detector(det_fname)
     self.det_sim_tests(det, old_style=True)
     os.remove(det_fname)
    def __init__(self,
                 directory,
                 name_pattern='pe_%.3feV_ts_%.3fmm_tx_%.3fmm_tz_%.3fmm_$id',
                 photon_energies=None,
                 tss=None,
                 txs=None,
                 tzs=None,
                 scan_range=0.1,
                 scan_exposure_time=0.025,
                 angle_per_frame=0.1,
                 direct_beam=True,
                 analysis=None,
                 handle_detector_beamstop=False):

        experiment.__init__(self,
                            name_pattern=name_pattern,
                            directory=directory,
                            analysis=analysis)

        self.directory = directory
        self.name_pattern = name_pattern
        self.photon_energies = photon_energies
        self.tss = tss
        self.txs = txs
        self.tzs = tzs
        self.scan_range = scan_range
        self.scan_exposure_time = scan_exposure_time
        self.angle_per_frame = angle_per_frame
        self.direct_beam = direct_beam
        self.nimages = int(self.scan_range / self.angle_per_frame)

        #actuators
        self.detector = detector()
        self.goniometer = goniometer()
        self.energy_motor = energy_motor()
        self.transmission_motor = transmission_motor()

        self.capillary_park_position = 80
        self.aperture_park_position = 80
        self.detector_beamstop_park_position = 18.5
        self.handle_detector_beamstop = handle_detector_beamstop

        self.parameter_fields = self.parameter_fields.union(
            beamcenter_calibration.specific_parameter_fields)

        print 'self.parameter_fields', self.parameter_fields
Exemple #40
0
 def test_slice_merge3d(self):
     print('=== Testing slice_merge3d() ===')
     det = detector.detector()
     det.parse_detector(recon_folder+b'/data/det_sim.dat')
     view = np.ascontiguousarray(det.pixels[:,3])
     quat = np.array([np.sqrt(0.86),0.1,0.2,0.3])
     model = np.zeros(3*(145,))
     weight = np.zeros_like(model)
     interp.slice_merge3d(quat, view, model, weight, det)
     npt.assert_array_almost_equal(model, weight)
     npt.assert_array_almost_equal(model[103:106,68:71,82:85], [[[0.05970267, 0.86777407, 0.08261854], [0.29557584, 0.6624112, 0.00868513], [0.69197243, 0.40168333, 0.]], [[0., 0.69936496, 0.34398347], [0.07919628, 1.25519294, 0.0490487], [0.38575382, 0.65815609, 0.]], [[0., 0.45319003, 0.65207203], [0.01374454, 0.68324196, 0.25491581], [0.12366799, 0.84800088, 0.05462553]]])
     
     view = np.ascontiguousarray(det.pixels[:,3])*np.arange(det.num_pix)
     model = np.zeros(3*(145,))
     weight2 = np.zeros_like(model)
     interp.slice_merge3d(quat, view, model, weight2, det)
     npt.assert_array_almost_equal(weight, weight2)
     npt.assert_array_almost_equal(model[103:106,68:71,82:85], [[[480.23952805, 7053.79230354, 672.87605846], [2377.76518912, 5341.74335349, 70.74035788], [5519.30333337, 3220.70729727, 0.]], [[0., 5738.38868753, 2835.8821622], [640.53422865, 10226.00092727, 403.93064498], [3106.35276845, 5325.18474032, 0.]], [[0., 3752.37021246, 5424.77431879], [111.97675292, 5624.55664759, 2102.41717762], [1007.59124681, 6925.69283809, 450.55910447]]])
Exemple #41
0
 def test_slice_merge2d(self):
     print('=== Testing slice_merge2d() ===')
     det = detector.detector()
     qmax = det.parse_detector(recon_folder+b'/data/det_sim.dat', norm_flag=-3)
     view = np.ascontiguousarray(det.pixels[:,2])
     angle = np.array([1.37*np.pi])
     model = np.zeros((3, 145, 145))
     weight = np.zeros_like(model)
     interp.slice_merge2d(angle, view, model, weight, det)
     npt.assert_array_almost_equal(model, weight)
     npt.assert_array_almost_equal(model[0,88:91,82:85], [[1.013543, 1.013526, 0.95921],[0.948738, 0.988016, 0.986792], [1.013526, 0.985034, 0.98392]])
     
     view = np.ascontiguousarray(det.pixels[:,2])*np.arange(det.num_pix)
     model = np.zeros((3, 145, 145))
     weight2 = np.zeros_like(model)
     interp.slice_merge2d(angle, view, model, weight2, det)
     npt.assert_array_almost_equal(weight, weight2)
     npt.assert_array_almost_equal(model[0,88:91,82:85], [[3590.950843, 3495.003523, 3221.593928], [3314.725198, 3367.85047, 3269.304046], [3513.177067, 3323.48564, 3226.194119]])
Exemple #42
0
 def __init__(self,
              scan_range,
              scan_exposure_time,
              scan_start_angle,
              angle_per_frame,
              name_pattern,
              directory='/nfs/ruchebis/spool/2016_Run3/orphaned_collects',
              image_nr_start=1,
              helical=False):
     
     self.goniometer = goniometer()
     self.detector = detector()
     self.beam_center = beam_center()
     
     self.detector.set_trigger_mode('exts')
     self.detector.set_nimages_per_file(100)
     self.detector.set_ntrigger(1)
     scan_range = float(scan_range)
     scan_exposure_time = float(scan_exposure_time)
     
     nimages, rest = divmod(scan_range, angle_per_frame)
     
     if rest > 0:
         nimages += 1
         scan_range += rest*angle_per_frame
         scan_exposure_time += rest*angle_per_frame/scan_range
         
     frame_time = scan_exposure_time/nimages
     
     self.scan_range = scan_range
     self.scan_exposure_time = scan_exposure_time
     self.scan_start_angle = scan_start_angle
     self.angle_per_frame = angle_per_frame
     
     self.nimages = int(nimages)
     self.frame_time = float(frame_time)
     self.count_time = self.frame_time - self.detector.get_detector_readout_time()
     
     self.name_pattern = name_pattern
     self.directory = directory
     self.image_nr_start = image_nr_start
     self.helical = helical
     self.status = None
Exemple #43
0
 def test_slice_gen2d(self):
     print('=== Testing slice_gen2d() ===')
     det = detector.detector()
     det.parse_detector(recon_folder+b'/data/det_sim.dat', norm_flag=-3)
     intens = np.arange(145*145*3).astype('f8').reshape(3,145,145)
     view = np.zeros(det.num_pix)
     
     angle = np.array([1.37*np.pi])
     interp.slice_gen2d(angle, view, intens, det)
     self.assertAlmostEqual(view.mean(), 10512.)
     npt.assert_array_almost_equal(view[:5], [6674.995665, 6808.058374, 6941.174136, 7074.339593, 7207.551378])
     interp.slice_gen2d(angle, view, intens, det, rescale=1.)
     self.assertAlmostEqual(view.mean(), 9.1579227696454524)
     npt.assert_array_almost_equal(view[:5], [8.806124, 8.825862, 8.845226, 8.864229, 8.882885])
     
     angle = np.array([3.14*np.pi])
     interp.slice_gen2d(angle, view, intens[1:], det)
     self.assertAlmostEqual(view.mean(), 31537.)
     npt.assert_array_almost_equal(view[:5], [34414.902109, 34489.21956, 34563.301629, 34637.146226, 34710.751266])
     interp.slice_gen2d(angle, view, intens[1:], det, rescale=1.)
     self.assertAlmostEqual(view.mean(), 10.349731872416205)
     npt.assert_array_almost_equal(view[:5], [10.446245, 10.448402, 10.450548, 10.452682, 10.454805])
Exemple #44
0
 def test_slice_gen3d(self):
     print('=== Testing slice_gen3d() ===')
     det = detector.detector()
     det.parse_detector(recon_folder+b'/data/det_sim.dat')
     intens = 1.e-9*np.fromfile(recon_folder+b'/data/intensities.bin').reshape(3*(145,))
     view = np.zeros(det.num_pix)
     
     quat = np.array([1.,0,0,0])
     interp.slice_gen3d(quat, view, intens, det)
     self.assertAlmostEqual(view.mean(), 223.922218946)
     npt.assert_array_almost_equal(view[:5], [0.03021473, 0.02554173, 0.01861631, 0.01085438, 0.00459315])
     interp.slice_gen3d(quat, view, intens, det, rescale=1.)
     self.assertAlmostEqual(view.mean(), 1.86882056344)
     npt.assert_array_almost_equal(view[:5], [-3.49942586, -3.66744152, -3.98371703, -4.5231864, -5.38318919])
     
     quat = np.array([np.sqrt(0.86),0.1,0.2,0.3])
     interp.slice_gen3d(quat, view, intens, det)
     self.assertAlmostEqual(view.mean(), 184.449773553)
     npt.assert_array_almost_equal(view[:5], [0.00039123, 0.00014522, 0.00057308, 0.00185642, 0.00371838])
     interp.slice_gen3d(quat, view, intens, det, rescale=1.)
     self.assertAlmostEqual(view.mean(), 0.567310517859)
     npt.assert_array_almost_equal(view[:5], [-7.84620536, -8.83729446, -7.46449246, -6.28910363, -5.59446611])
Exemple #45
0
def main():
        expname = 'myexpe/'
        log = 'generate ground truth bounding box for fine detector test \r\n'  
        data = 'icdar2013word' # data for training/testing 
        eMode = True # edge detection
        coarseCodeBookName = '../codebooks/coarseDet/codeBook.npy' # codebook name
        fineCodeBookName =   '../codebooks/fineDet/codeBook.npy' # codebook name

        mode = 'adaboost' # classification mode for detector
        lMode = 'foreground' # foreground/whitespace
        fMode = 'context' # local or contextual
        psize = 32 
        ssize = 16 
        nob = 3
        ratio = 0.9
        rrange = 30
        para0 = (float(psize - ssize)/psize)**2
        para1 = 1 - ratio
        rpower = ratio ** numpy.asarray(range(rrange))

        # define parameters ###########################################################:

        coarseclfname = 'coarseDet'
        fineclfname = 'fineDet3'
        pdirname = '../data/' # dir contains all experiment data
        datalist = 'datalist'
        cdirname = os.path.join(pdirname, expname)
        clfdir = os.path.join(cdirname, 'clf/') # dir to save classifier
        rawdir = os.path.join(cdirname, 'raw/') # dir for original image
        npydir = os.path.join(cdirname, 'npy/') # dir for feature and label npy
        roidir = os.path.join(cdirname, 'roi/') # dir for region of interest of coarse detector
        roitestdir = os.path.join(cdirname, 'roitest/') # dir for region of interest fine detector
        predir = os.path.join(cdirname, 'pre/') # dir for preprocessing
        preMapdir = os.path.join(cdirname, 'preMap/') # dir for preprocessing hotmaps
        predtxtdir = os.path.join(cdirname, 'pretxt/') # dir for txt file of bounding boxes.
        resdir = os.path.join(cdirname, 'res/') # dir for results
        mapdir = os.path.join(cdirname, 'map/') # dir for hotmaps
        pmapdir = os.path.join(cdirname, 'pmap/') # dir for pixel maps
        txtdir = os.path.join(cdirname, 'txt/') # dir for bounding box txt files
        # write log file, a simple discription of experiment
        with open(os.path.join(cdirname, 'log.txt'), 'a') as f:
                f.write(log)

        # parse data ###################################################################:
        if data == 'icdar2003word':
                # define direcotries and filenames:
                imdir = '../icdar2003/icdar2003/SceneTrialTest' # containing original image
                xmlfilename = '../icdar2003/icdar2003/SceneTrialTest/locations.xml'
                myParser = parseWord2003()
                dataList = myParser.parseData(imdir, xmlfilename)

        elif data == 'icdar2013word':
                #imdir = '../icdar2013/task21_22/train/image' # containing original image
                #bbdir = '../icdar2013/task21_22/train/word_label' # containing bb text files.
                imdir = '../icdar2013/task21_22/test/image' # containing original image
                bbdir = '../icdar2013/task21_22/test/word_label' # containing bb text files.
                myParser = parseWord2013()
                dataList = myParser.parseData(imdir, bbdir)

        elif data == 'icdar2013char':
                imdir = '../icdar2013/task21_22/train/image' # containing original image
                bbdir = '../icdar2013/task21_22/train/char_label' # containing bb text files
                myParser = parseChar2013()
                dataList = myParser.parseData(imdir, bbdir)

        else:
                raise Exception('No data named:'+data+' found!')

        dataList = myParser.prepareImg(dataList, imdir, rawdir)
        pickle_save(dataList, datalist, cdirname)
        # extract features ############################################################:
        dataList = pickle_load(datalist, cdirname)
        codebook = codebook_load(coarseCodeBookName)
        myDetector = detector(codebook, dataList,
                psize, ssize, nob, rpower,
                para0, para1,
                lMode, fMode, eMode )

        myDetector.image_train(rawdir, npydir)
        # training classsifier ########################################################:
        myClassifier = classifier(mode)
        myClassifier.data_load(npydir) # load training data
        myClassifier.clf_train() # train classifier
        myClassifier.clf_save(coarseclfname, clfdir) # save classifier
        myClassifier.clf_load(coarseclfname, clfdir)
        myClassifier.clf_test() # test classifier
Exemple #46
0
def main():
    # define parameters ###########################################################:
    expname = 'myexpe/'
    data = 'icdar2013word' # data for training/testing
    eMode = True # edge detection
    CodeBookName1 = '../codebooks/Patch/codeBook.npy' # codebook name
    CodeBookName2 =   '../codebooks/Verify/codeBook.npy' # codebook name

    coarseclfname = 'coarse'
    fineclfname = 'fine'
    wordgraphclfname = 'wordgraph'
    pdirname = '../data/' # dir contains all experiment data
    cdirname = os.path.join(pdirname, expname)
    clfdir = os.path.join(cdirname, 'clf/') # dir to save classifier
    rawdir = os.path.join(cdirname, 'raw/') # dir for original image
    npydir = os.path.join(cdirname, 'npy/') # dir for feature and label npy
    roitestdir = os.path.join(cdirname, 'roitest/') # dir for region of interest fine detector
    predir = os.path.join(cdirname, 'pre/') # dir for preprocessing
    predtxtdir = os.path.join(cdirname, 'pretxt/') # dir for txt file of bounding boxes.
    txtdir = os.path.join(cdirname, 'txt/') # dir for bounding box txt files
    # applying coarse detector ###########################################################:
    mode = 'adaboost' # classification mode for detector
    lMode = 'foreground' # foreground/whitespace
    fMode = 'context' # local or contextual
    psize = 32
    ssize = 16
    nob = 3
    ratio = 0.9
    rrange = 30
    para0 = (float(psize - ssize)/psize)**2
    para1 = 1 - ratio
    rpower = ratio ** numpy.asarray(range(rrange))

    data = pickle_load('detect', cdirname)
    codebook = codebook_load(CodeBookName1)
    myDetector = detector(codebook, data,
            psize, ssize,
            nob, rpower,
            para0, para1,
            lMode, fMode, eMode)
    myClassifier = classifier()
    myClassifier.clf_load(coarseclfname, clfdir)
    myDetector.image_test(rawdir, predir, myClassifier.classifier)
    # applying fine detector and region growing ###########################################################:
    mode = 'adaboost' # classification mode for detector
    lMode = 'foreground' # foreground/whitespace
    fMode = 'local' # local or contextual
    rpower = ratio ** numpy.asarray(range(rrange))
    codebook = codebook_load(CodeBookName2)
    data = pickle_load('region', cdirname)
    myDetector = detector(codebook, data,
            psize, ssize, nob, rpower,
            para0, para1,
            lMode, fMode, eMode)
    myClassifier = classifier(mode)
    myClassifier.clf_load(fineclfname, clfdir)
    myDetector.roi_test(predir, rawdir, roitestdir, myClassifier.classifier)
    # applying word graph ###########################################################:
    myClassifier = classifier()
    myClassifier.clf_load(wordgraphclfname, clfdir)
    wordbb = wordGraph_test(roitestdir, myClassifier.classifier)
    wordbb2pred(wordbb, predtxtdir)
Exemple #47
0
def featLearn(data = 'icdar2013word'):

	# define parameters ###########################################################:
	psize = 32 # size of image patch
	ssize = 16 # step size
	ratio = 0.90 # ratio that each time image resized by
	rpower = ratio**numpy.asarray(range(30)) # number of power ratio is times by
	para0 = 0.8 # overlapping threshold from Coates paper
	para1 = 0.3 # width/height threshold from Coates paper
	nob = 3 # number of blocks
        expname = 'ft0/' # current experiment dir

	codeBookName = '../codebooks/codebook/codeBook.npy' # codebook name
	lMode = 'foreground' # {'foreground', 'whitespace', 'whitespace_strict'}
	fMode = 'local' #{'original' (no convolution), 'local', 'context'}
        eMode = False

        pdirname = '../data/' # dir contains all experiment data
        parsedGTName = 'detect'
        cdirname = os.path.join(pdirname, expname)
	labdir = os.path.join(cdirname, 'raw/') # dir for original image
	npydir = os.path.join(cdirname, 'npy/') # dir for feature and label npy

        # parse data ###################################################################:
	if data == 'icdar2003word':
		# define direcotries and filenames:
		imdir = '../icdar2003/icdar2003/SceneTrialTest' # containing original image
		xmlfilename = '../icdar2003/icdar2003/SceneTrialTest/locations.xml'
		myParser = parseWord2003()
		groundTruth = myParser.parseData(imdir, xmlfilename)

	elif data == 'icdar2013word':
		imdir = '../icdar2013/task21_22/train/image' # containing original image
		bbdir = '../icdar2013/task21_22/train/word_label' # containing bb text files.
                #imdir = '../icdar2013/task21_22/test/image' # containing original image
                #bbdir = '../icdar2013/task21_22/test/word_label' # containing bb text files.
		myParser = parseWord2013()
		groundTruth = myParser.parseData(imdir, bbdir)

	elif data == 'icdar2013char':
		imdir = '../icdar2013/task21_22/train/image' # containing original image
		bbdir = '../icdar2013/task21_22/train/char_label' # containing bb text files
		myParser = parseChar2013()
		groundTruth = myParser.parseData(imdir, bbdir)

	else:
		raise Exception('No data named:'+data+' found!')
        groundTruth = myParser.prepareImg(groundTruth, imdir, labdir)
        pickle_save(groundTruth, parsedGTName, cdirname)

        # extract features ############################################################:
        groundTruth = pickle_load(parsedGTName, cdirname)
        codebook = codebook_load(codeBookName)
        myDetector = detector(codebook, groundTruth,
                psize, ssize, nob, rpower,
                para0, para1,
                lMode, fMode, eMode)
        myDetector.image_train(labdir, npydir)

	# load image patches
        kk = 1000
        ii = 1000
        data = dataSample(data)
	w = numpy.ones(data.shape[0])
	D = ckmean.init(data, kk)
	D, idx, dm  = ckmean.update(data, D, ii, w)
        cbdir = '../codebooks/codebook/'
	numpy.save(os.path.join(cbdir, 'codeBook'), D)
	numpy.save(os.path.join(cbdir, 'codeBookIdx'), idx)
	numpy.save(os.path.join(cbdir, 'codeBookErr'), dm)
Exemple #48
0
#!/usr/bin/env python
from detector import detector
from goniometer import goniometer
from sweep import sweep
from reference_images import reference_images
from beam_center import beam_center
from raster import raster
from protective_cover import protective_cover
from camera import camera
from resolution import resolution

if __name__ == '__main__':
    import optparse
    parser = optparse.OptionParser() 
    # testbed ip 62.12.151.50
    parser.add_option('-i', '--ip', default="172.19.10.26", type=str, help='IP address of the server')
    parser.add_option('-p', '--port', default=80, type=int, help='port on which to which it listens to')
    
    options, args = parser.parse_args()
     
    d = detector(host=options.ip, port=options.port)
    g = goniometer()
Exemple #49
0
 def create_det(self):
     det = detector.detector()
     det.parse_detector(recon_folder+b'/data/det_sim.dat')
     return det