Example #1
0
    def calc_data_from_files_triang_ransac(self, noload=False):
        imgs = [cv2.imread(f) for f in self.filenames]
        matches, kpts = self.getMatches()

        import DataCache as DC
        datafile = DC.POINTS4D_HOMOGR_TRIANG_RANSAC
        data = None if noload else DC.getData(datafile)
        if data is None:
            graph = self.getGraph(matches, kpts)

            points = []
            pointData = []
            tmats = [MarkerDetect.loadMat(f) for f in self.filenames]

            num_done = 0
            for node in graph:
                if num_done % 1000 == 0:
                    print num_done, len(graph.keys()), len(pointData)
                num_done += 1

                nodes = [n for n in graph[node]]
                nodes.append(node)
                point, inliers = self.getCliquePosRANSAC(nodes, kpts, tmats, err_thresh=10)

                if point is not None:
                    im_idx = node[0]
                    kpt_idx = node[1]
                    pointData.append((kpts[im_idx][1][kpt_idx], point, im_idx, kpt_idx))

            DC.saveData(datafile, (points, pointData))
        else:
            points, pointData = data

        return imgs, kpts, points, pointData
Example #2
0
def getHackerNewsSimpleContent(fetcherURL,
                               fetcherBackupURL,
                               id,
                               page='',
                               format='json',
                               url='',
                               referer='',
                               remote_addr=''):
    #don't cache paginated content
    if (page):
        return APIUtils.parsePageContent(fetcherURL, fetcherBackupURL, id,
                                         page, format)
    else:
        cachedData = getCache(id, format)
        if (cachedData):
            return cachedData
        else:
            hnData = APIUtils.parsePageContent(fetcherURL, fetcherBackupURL,
                                               id, page, format)
            if (hnData):
                logging.debug(
                    'getHackerNewsSimpleContent: storing cached value for id %s'
                    % id)
                DataCache.putData(id, format, APIUtils.removeNonAscii(hnData),
                                  url, referer, remote_addr)
                return hnData
            else:
                logging.warning(
                    'getHackerNewsSimpleContent: unable to retrieve data for id %s'
                    % id)
                return ''
    def calc_data_from_files_triang_ransac(self, noload=False):
        imgs = [cv2.imread(f) for f in self.filenames]
        matches, kpts = self.getMatches()

        import DataCache as DC
        datafile = DC.POINTS4D_HOMOGR_TRIANG_RANSAC
        data = None if noload else DC.getData(datafile)
        if data is None:
            graph = self.getGraph(matches, kpts)

            points = []
            pointData = []
            tmats = [MarkerDetect.loadMat(f) for f in self.filenames]

            num_done = 0
            for node in graph:
                if num_done % 1000 == 0:
                    print num_done, len(graph.keys()), len(pointData)
                num_done += 1

                nodes = [n for n in graph[node]]
                nodes.append(node)
                point, inliers = self.getCliquePosRANSAC(nodes, kpts, tmats, err_thresh=10)

                if point is not None:
                    im_idx = node[0]
                    kpt_idx = node[1]
                    pointData.append((kpts[im_idx][1][kpt_idx], point, im_idx, kpt_idx))

            DC.saveData(datafile, (points, pointData))
        else:
            points, pointData = data

        return imgs, kpts, points, pointData
Example #4
0
def getHackerNewsLatestContent(page='',
                               format='json',
                               url='',
                               referer='',
                               remote_addr='',
                               limit=1):
    #only cache homepage data
    limit = int(limit)
    if (page):
        return APIUtils.parsePageContent(AppConfig.hackerNewsURL,
                                         AppConfig.hackerNewsURLBackup,
                                         '/latest', page, format, limit)
    else:
        id = '/latest/%s' % limit
        cachedData = getCache(id, format)
        if (cachedData):
            return cachedData
        else:
            hnData = APIUtils.parsePageContent(AppConfig.hackerNewsURL,
                                               AppConfig.hackerNewsURLBackup,
                                               '/latest', page, format, limit)
            if (hnData):
                logging.debug(
                    'getHackerNewsLatestContent: storing cached value for id %s'
                    % id)
                DataCache.putData(id, format, APIUtils.removeNonAscii(hnData),
                                  url, referer, remote_addr)
                return hnData
            else:
                logging.warning(
                    'getHackerNewsLatestContent: unable to retrieve data for id %s'
                    % id)
                return ''
Example #5
0
def getHackerNewsSubmittedContent(user,
                                  format='json',
                                  url='',
                                  referer='',
                                  remote_addr=''):
    #only cache homepage data
    apiURL = "%s/submitted?id=%s" % (AppConfig.hackerNewsURL, user)
    apiURLBackup = "%s/submitted?id=%s" % (AppConfig.hackerNewsURLBackup, user)
    id = '/submitted/%s' % (user)
    cachedData = None
    cachedData = getCache(id, format)
    if (cachedData):
        return cachedData
    else:
        hnData = APIUtils.parsePageContent(apiURL, apiURLBackup, '/submitted',
                                           None, format)
        if (hnData):
            logging.debug(
                'getHackerNewsSubmittedContent: storing cached value for id %s'
                % id)
            DataCache.putData(id, format, APIUtils.removeNonAscii(hnData), url,
                              referer, remote_addr)
            return hnData
        else:
            logging.warning(
                'getHackerNewsSubmittedContent: unable to retrieve data for id %s'
                % id)
            return ''
    def calc_data_from_files_triang_simple(self, noload=False):
        imgs = [cv2.imread(f) for f in self.filenames]
        matches, kpts = self.getMatches()

        import DataCache as DC
        datafile = DC.POINTS4D_HOMOGR_TRIANG_SIMPLE
        data = None if noload else DC.getData(datafile)
        if data is None:
            graph = self.getGraph(matches, kpts)

            points = []
            pointData = []
            tmats = [MarkerDetect.loadMat(f) for f in self.filenames]
            projMats = [np.dot(Utils.camMtx, tmat) for tmat in tmats]
            for node in graph:
                for neigh in graph[node]:
                    im_idx1 = node[0]
                    im_idx2 = neigh[0]
                    kpt_idx1 = node[1]
                    kpt_idx2 = neigh[1]

                    imgPt1 = np.array(kpts[im_idx1][0][kpt_idx1].pt)
                    imgPt2 = np.array(kpts[im_idx2][0][kpt_idx2].pt)

                    p4d = self._triangulate(projMats[node[0]], projMats[neigh[0]], imgPt1, imgPt2)
                    p4d = p4d[:3, :]
                    pointData.append((kpts[im_idx1][1][kpt_idx1], p4d, im_idx1, kpt_idx1))

            DC.saveData(datafile, (points, pointData))
        else:
            points, pointData = data

        return imgs, kpts, points, pointData
Example #7
0
def getHackerNewsComments(articleId,
                          format='json',
                          url='',
                          referer='',
                          remote_addr=''):
    #only cache homepage data
    apiURL = "%s/item?id=%s" % (AppConfig.hackerNewsURL, articleId)
    apiURLBackup = "%s/item?id=%s" % (AppConfig.hackerNewsURLBackup, articleId)
    id = '/comments/%s' % (articleId)
    cachedData = getCache(id, format)
    if (cachedData):
        return cachedData
    else:
        hnData = APIUtils.parseCommentsContent(apiURL, apiURLBackup,
                                               '/comments', None, format)
        if (hnData):
            logging.debug(
                'getHackerNewsComments: storing cached value for id %s' % id)
            DataCache.putData(id, format, APIUtils.removeNonAscii(hnData), url,
                              referer, remote_addr)
            return hnData
        else:
            logging.warning(
                'getHackerNewsComments: unable to retrieve data for id %s' %
                id)
            return ''
Example #8
0
def evaluate(timeout=None):
  source = cache.waitValue(cmdSource.getId(), timeout)
  if not _isPlayingSource(source):
    _clearValues()
    return
  cache.waitValue(cmdNse.getId() + '8')
  _evaluateValues()
Example #9
0
def _evaluateValues():
  if (cache.getValue('line0').startswith("Now Playing")):
    _cachedValues['title'].update(cache.getValue('line1'))
    _cachedValues['artist'].update(cache.getValue('line2'))
    _cachedValues['album'].update(cache.getValue('line4'))
  else:
    _clearValues()
Example #10
0
    def calc_data_from_files_triang_simple(self, noload=False):
        imgs = [cv2.imread(f) for f in self.filenames]
        matches, kpts = self.getMatches()

        import DataCache as DC
        datafile = DC.POINTS4D_HOMOGR_TRIANG_SIMPLE
        data = None if noload else DC.getData(datafile)
        if data is None:
            graph = self.getGraph(matches, kpts)

            points = []
            pointData = []
            tmats = [MarkerDetect.loadMat(f) for f in self.filenames]
            projMats = [np.dot(Utils.camMtx, tmat) for tmat in tmats]
            for node in graph:
                for neigh in graph[node]:
                    im_idx1 = node[0]
                    im_idx2 = neigh[0]
                    kpt_idx1 = node[1]
                    kpt_idx2 = neigh[1]

                    imgPt1 = np.array(kpts[im_idx1][0][kpt_idx1].pt)
                    imgPt2 = np.array(kpts[im_idx2][0][kpt_idx2].pt)

                    p4d = self._triangulate(projMats[node[0]], projMats[neigh[0]], imgPt1, imgPt2)
                    p4d = p4d[:3, :]
                    pointData.append((kpts[im_idx1][1][kpt_idx1], p4d, im_idx1, kpt_idx1))

            DC.saveData(datafile, (points, pointData))
        else:
            points, pointData = data

        return imgs, kpts, points, pointData
def calc_data_from_files_triang(files, datafile = DC.POINTS4D_TRIANGULATE, noload = False):
    imgs = [cv2.imread(f) for f in files]
    masks = [cv2.imread("imgs/00%d_mask.png" % i, 0) for i in range(5, 10)]
    sfm = SFMSolver(files, masks)
    matches, kpts = sfm.getMatches()

    data = None if noload else DC.getData(datafile)
    if data is None:
        graph = sfm.getGraph(matches, kpts)
        all_levels = sfm.extractCliques(graph, maxlevel=3)
        # sfm.extendCliques(graph, all_levels[0], 1)
        # all_levels = sfm.extractCliques(graph, maxlevel=3)
        # sfm.extendCliques(graph, all_levels[0], 1)
        # all_levels = sfm.extractCliques(graph, maxlevel=3)

        tmats = [MarkerDetect.loadMat(f) for f in files]
        points = []
        pointData = []

        for i in range(len(all_levels[0])):
            if i % 1000 == 0: print i, len(all_levels[0]), len(pointData)
            c = all_levels[0][i]

            edge1 = (c[0], c[1])
            edge2 = (c[1], c[2])
            edge3 = (c[0], c[2])

            edge = edge1
            pt0, pt1 = sfm.getEdgePosTriangulate(edge, graph, kpts, tmats)
            if pt0 is not None:
                img_idx, kpt_idx = edge[0]
                pointData.append((kpts[img_idx][1][kpt_idx], pt0, img_idx, kpt_idx))
            if pt1 is not None:
                img_idx, kpt_idx = edge[1]
                pointData.append((kpts[img_idx][1][kpt_idx], pt1, img_idx, kpt_idx))

            edge = edge2
            pt0, pt1 = sfm.getEdgePosTriangulate(edge, graph, kpts, tmats)
            if pt0 is not None:
                img_idx, kpt_idx = edge[0]
                pointData.append((kpts[img_idx][1][kpt_idx], pt0, img_idx, kpt_idx))
            if pt1 is not None:
                img_idx, kpt_idx = edge[1]
                pointData.append((kpts[img_idx][1][kpt_idx], pt1, img_idx, kpt_idx))

            edge = edge3
            pt0, pt1 = sfm.getEdgePosTriangulate(edge, graph, kpts, tmats)
            if pt0 is not None:
                img_idx, kpt_idx = edge[0]
                pointData.append((kpts[img_idx][1][kpt_idx], pt0, img_idx, kpt_idx))
            if pt1 is not None:
                img_idx, kpt_idx = edge[1]
                pointData.append((kpts[img_idx][1][kpt_idx], pt1, img_idx, kpt_idx))

        DC.saveData(datafile, (points, pointData))
    else:
        points, pointData = data

    return imgs, kpts, points, pointData
Example #12
0
def calc_data_from_files_triang(files, datafile = DC.POINTS4D_TRIANGULATE, noload = False):
    imgs = [cv2.imread(f) for f in files]
    masks = [cv2.imread("imgs/00%d_mask.png" % i, 0) for i in range(5, 10)]
    sfm = SFMSolver(files, masks)
    matches, kpts = sfm.getMatches()

    data = None if noload else DC.getData(datafile)
    if data is None:
        graph = sfm.getGraph(matches, kpts)
        all_levels = sfm.extractCliques(graph, maxlevel=3)
        # sfm.extendCliques(graph, all_levels[0], 1)
        # all_levels = sfm.extractCliques(graph, maxlevel=3)
        # sfm.extendCliques(graph, all_levels[0], 1)
        # all_levels = sfm.extractCliques(graph, maxlevel=3)

        tmats = [MarkerDetect.loadMat(f) for f in files]
        points = []
        pointData = []

        for i in range(len(all_levels[0])):
            if i % 1000 == 0: print i, len(all_levels[0]), len(pointData)
            c = all_levels[0][i]

            edge1 = (c[0], c[1])
            edge2 = (c[1], c[2])
            edge3 = (c[0], c[2])

            edge = edge1
            pt0, pt1 = sfm.getEdgePosTriangulate(edge, graph, kpts, tmats)
            if pt0 is not None:
                img_idx, kpt_idx = edge[0]
                pointData.append((kpts[img_idx][1][kpt_idx], pt0, img_idx, kpt_idx))
            if pt1 is not None:
                img_idx, kpt_idx = edge[1]
                pointData.append((kpts[img_idx][1][kpt_idx], pt1, img_idx, kpt_idx))

            edge = edge2
            pt0, pt1 = sfm.getEdgePosTriangulate(edge, graph, kpts, tmats)
            if pt0 is not None:
                img_idx, kpt_idx = edge[0]
                pointData.append((kpts[img_idx][1][kpt_idx], pt0, img_idx, kpt_idx))
            if pt1 is not None:
                img_idx, kpt_idx = edge[1]
                pointData.append((kpts[img_idx][1][kpt_idx], pt1, img_idx, kpt_idx))

            edge = edge3
            pt0, pt1 = sfm.getEdgePosTriangulate(edge, graph, kpts, tmats)
            if pt0 is not None:
                img_idx, kpt_idx = edge[0]
                pointData.append((kpts[img_idx][1][kpt_idx], pt0, img_idx, kpt_idx))
            if pt1 is not None:
                img_idx, kpt_idx = edge[1]
                pointData.append((kpts[img_idx][1][kpt_idx], pt1, img_idx, kpt_idx))

        DC.saveData(datafile, (points, pointData))
    else:
        points, pointData = data

    return imgs, kpts, points, pointData
Example #13
0
File: Prov.py Project: mitkatch/BPE
	def _getTradeDirection(self, data, r1, r2, c1, c2):
		# if Bid is flashing it's Hit
		tradeDir = ' ' 
		#bidSizeVideo = DataCache.getVideo(self.page, int(r1), int(c1) + 24) # bibSize
		#askSizeVideo = DataCache.getVideo(self.page, int(r1), int(c1) + 26) # askSize 
		#c check Price
		bidSizeVideo = DataCache.getVideo(self.page, int(r1), int(c1) + 12) # bibSize
		askSizeVideo = DataCache.getVideo(self.page, int(r1), int(c1) + 14) # askSize 
		if (askSizeVideo & 0x01) == 0x01:
			tradeDir = 'T'
		if (bidSizeVideo & 0x01) == 0x01:
			tradeDir = 'H'
	
		return tradeDir	
Example #14
0
def calc_data_from_files(files, noload = False, datafile = DC.POINTS4D):
    imgs = [cv2.imread(f) for f in files]
    masks = [cv2.imread("imgs/00%d_mask.png" % i, 0) for i in range(5, 10)]
    sfm = SFMSolver(files, masks)
    matches, kpts = sfm.getMatches()

    import DataCache as DC
    data = None if noload else DC.getData(datafile)
    if data is None:
        graph = sfm.getGraph(matches, kpts)
        all_levels = sfm.extractCliques(graph, maxlevel=3)
        # sfm.extendCliques(graph, all_levels[0], 1)
        # all_levels = sfm.extractCliques(graph, maxlevel=3)
        # sfm.extendCliques(graph, all_levels[0], 1)
        # all_levels = sfm.extractCliques(graph, maxlevel=3)

        tmats = [MarkerDetect.loadMat(f) for f in files]
        points = []

        # for c in all_levels[0]:
        #     point, inliers = sfm.getCliquePosRANSAC(c, kpts, tmats, err_thresh=100)
        #     if point is not None:
        #         points.append((c, point, inliers))
        # print "num points: ", len(points)
        # for c, p, inl in points:
        #     print "--- new clique ---"
        #     # print p
        #     # calc_repr_err(c, p, inl, tmats, kpts)
        #     if draw(c, imgs, kpts) == 27:
        #         return

        for i in range(len(all_levels[0])):
            if i % 1000 == 0: print i, len(all_levels[0]), len(points)
            c = all_levels[0][i]
            point = sfm.getCliquePosSimple(c, kpts, tmats, avg_err_thresh=5, max_err_thresh=10)
            if point is not None:
                points.append((c, point))

        pointData = []
        for c, p in points:
            for node in c:
                img_idx, kpt_idx = node
                pointData.append((kpts[img_idx][1][kpt_idx], p, img_idx, kpt_idx))

        DC.saveData(datafile, (points, pointData))
    else:
        points, pointData = data

    return imgs, kpts, points, pointData
def calc_data_from_files(files, noload = False, datafile = DC.POINTS4D):
    imgs = [cv2.imread(f) for f in files]
    masks = [cv2.imread("imgs/00%d_mask.png" % i, 0) for i in range(5, 10)]
    sfm = SFMSolver(files, masks)
    matches, kpts = sfm.getMatches()

    import DataCache as DC
    data = None if noload else DC.getData(datafile)
    if data is None:
        graph = sfm.getGraph(matches, kpts)
        all_levels = sfm.extractCliques(graph, maxlevel=3)
        # sfm.extendCliques(graph, all_levels[0], 1)
        # all_levels = sfm.extractCliques(graph, maxlevel=3)
        # sfm.extendCliques(graph, all_levels[0], 1)
        # all_levels = sfm.extractCliques(graph, maxlevel=3)

        tmats = [MarkerDetect.loadMat(f) for f in files]
        points = []

        # for c in all_levels[0]:
        #     point, inliers = sfm.getCliquePosRANSAC(c, kpts, tmats, err_thresh=100)
        #     if point is not None:
        #         points.append((c, point, inliers))
        # print "num points: ", len(points)
        # for c, p, inl in points:
        #     print "--- new clique ---"
        #     # print p
        #     # calc_repr_err(c, p, inl, tmats, kpts)
        #     if draw(c, imgs, kpts) == 27:
        #         return

        for i in range(len(all_levels[0])):
            if i % 1000 == 0: print i, len(all_levels[0]), len(points)
            c = all_levels[0][i]
            point = sfm.getCliquePosSimple(c, kpts, tmats, avg_err_thresh=5, max_err_thresh=10)
            if point is not None:
                points.append((c, point))

        pointData = []
        for c, p in points:
            for node in c:
                img_idx, kpt_idx = node
                pointData.append((kpts[img_idx][1][kpt_idx], p, img_idx, kpt_idx))

        DC.saveData(datafile, (points, pointData))
    else:
        points, pointData = data

    return imgs, kpts, points, pointData
Example #16
0
def estimate_test():
    np.set_printoptions(3, suppress=True)
    filename = "out/test/test.p"
    tro = Utils.getTransform(1, 2, 3, 4, 5, 6, True)
    tor = np.linalg.inv(tro)
    x, y, z = 1, 2, 3
    da, db, dc = 10, 20, 30
    ra, rb, rc = map(np.deg2rad, (da, db, dc))
    trt = Utils.getTransform(rc, rb, ra, x, y, z, True)
    ttr = np.linalg.inv(trt)
    ttc = Utils.getTransform(2, 3, 5, 4, 6, 1, True)
    tct = np.linalg.inv(ttc)
    tco = tct.dot(ttr).dot(tro)
    toc = np.linalg.inv(tco)
    xyz_g_ex = np.array([11, 5.2, -4, 1.0]).reshape((4, 1)) #objectben
    xyz_g_ex = tro.dot(xyz_g_ex)
    toc_goal = np.array([-1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape((4, 4))
    rot_g_ex = tro.dot(toc_goal).dot(tct)[:3, :3]
    cba_g_ex = map(np.rad2deg, Utils.rpy(rot_g_ex))
    print trt
    print ttc
    print tco
    print trt.dot(ttc.dot(tco))

    data = [{
        500: x * 10,
        501: y * 10,
        502: z * 10,
        503: da,
        504: db,
        505: dc
    }]
    DC.saveData(filename, data)
    print "estimate call"
    cba_g, xyz_g = estimate(filename.replace(".p", ".jpg"), tco[:3, :], tco[:3, :], tor, ttc)
    print "estimate return"
    print "----"
    print cba_g, xyz_g
    print "exact: "
    print cba_g_ex
    print xyz_g_ex
    trf_orig = Utils.getTransform(rc, rb, ra, x, y, z)
    print trf_orig
    rc, rb, ra = np.deg2rad(cba_g)
    x, y, z = xyz_g[:3]
    trf_est = Utils.getTransform(rc, rb, ra, x, y, z)
    print trf_est
    print np.max(np.abs(trf_est-trf_orig))
def estimate_test():
    np.set_printoptions(3, suppress=True)
    filename = "out/test/test.p"
    tro = Utils.getTransform(1, 2, 3, 4, 5, 6, True)
    tor = np.linalg.inv(tro)
    x, y, z = 1, 2, 3
    da, db, dc = 10, 20, 30
    ra, rb, rc = map(np.deg2rad, (da, db, dc))
    trt = Utils.getTransform(rc, rb, ra, x, y, z, True)
    ttr = np.linalg.inv(trt)
    ttc = Utils.getTransform(2, 3, 5, 4, 6, 1, True)
    tct = np.linalg.inv(ttc)
    tco = tct.dot(ttr).dot(tro)
    toc = np.linalg.inv(tco)
    xyz_g_ex = np.array([11, 5.2, -4, 1.0]).reshape((4, 1)) #objectben
    xyz_g_ex = tro.dot(xyz_g_ex)
    toc_goal = np.array([-1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape((4, 4))
    rot_g_ex = tro.dot(toc_goal).dot(tct)[:3, :3]
    cba_g_ex = map(np.rad2deg, Utils.rpy(rot_g_ex))
    print trt
    print ttc
    print tco
    print trt.dot(ttc.dot(tco))

    data = [{
        500: x * 10,
        501: y * 10,
        502: z * 10,
        503: da,
        504: db,
        505: dc
    }]
    DC.saveData(filename, data)
    print "estimate call"
    cba_g, xyz_g = estimate(filename.replace(".p", ".jpg"), tco[:3, :], tco[:3, :], tor, ttc)
    print "estimate return"
    print "----"
    print cba_g, xyz_g
    print "exact: "
    print cba_g_ex
    print xyz_g_ex
    trf_orig = Utils.getTransform(rc, rb, ra, x, y, z)
    print trf_orig
    rc, rb, ra = np.deg2rad(cba_g)
    x, y, z = xyz_g[:3]
    trf_est = Utils.getTransform(rc, rb, ra, x, y, z)
    print trf_est
    print np.max(np.abs(trf_est-trf_orig))
def findtest():
    from glob import glob
    from os.path import join

    np.set_printoptions(precision=3, suppress=True)

    files_dir = "out/2017_3_8__14_51_22/"
    files = glob(join(files_dir, "*.jpg"))
    # files = [f for f in files if f != file]
    # print files
    masks = []
    for f in files:
        m = f.replace(".jpg", "_mask.png")
        masks.append(m)
    sfm = SFMSolver(files, masks)
    imgs, kpts, points, data = sfm.calc_data_from_files_triang_simple()
    arr_calib = DC.getData("out/2017_4_5__15_6_49/arrangement_calib.p")
    ttc = arr_calib["ttc"]
    tor = arr_calib["tor"]
    files_dir = "out/2017_3_8__14_51_22/"
    files_dir = "out/2017_4_5__15_31_34/"
    files_dir = "out/2017_4_5__15_57_20/"

    files = glob(join(files_dir, "*.jpg"))
    files = glob(join(files_dir, "0037.jpg"))

    for f in files:
        find_ext_params(f, imgs, kpts, points, data, tor, ttc)
Example #19
0
def findtest():
    from glob import glob
    from os.path import join

    np.set_printoptions(precision=3, suppress=True)

    files_dir = "out/2017_3_8__14_51_22/"
    files = glob(join(files_dir, "*.jpg"))
    # files = [f for f in files if f != file]
    # print files
    masks = []
    for f in files:
        m = f.replace(".jpg", "_mask.png")
        masks.append(m)
    sfm = SFMSolver(files, masks)
    imgs, kpts, points, data = sfm.calc_data_from_files_triang_simple()
    arr_calib = DC.getData("out/2017_4_5__15_6_49/arrangement_calib.p")
    ttc = arr_calib["ttc"]
    tor = arr_calib["tor"]
    files_dir = "out/2017_3_8__14_51_22/"
    files_dir = "out/2017_4_5__15_31_34/"
    files_dir = "out/2017_4_5__15_57_20/"

    files = glob(join(files_dir, "*.jpg"))
    files = glob(join(files_dir, "0037.jpg"))

    for f in files:
        find_ext_params(f, imgs, kpts, points, data, tor, ttc)
def getHackerNewsComments(articleId, format='json', url='', referer='', remote_addr=''):
    #only cache homepage data
    apiURL = "%s/item?id=%s" % (AppConfig.hackerNewsURL, articleId)
    apiURLBackup = "%s/item?id=%s" % (AppConfig.hackerNewsURLBackup, articleId)
    id = '/comments/%s' % (articleId)
    cachedData = getCache(id, format)
    if (cachedData):
        return cachedData
    else:
        hnData = APIUtils.parseCommentsContent(apiURL, apiURLBackup, '/comments', None, format)
        if (hnData):
            logging.debug('getHackerNewsComments: storing cached value for id %s' % id)
            DataCache.putData(id, format, APIUtils.removeNonAscii(hnData), url, referer, remote_addr)
            return hnData
        else:
            logging.warning('getHackerNewsComments: unable to retrieve data for id %s' % id)
            return ''
def getHackerNewsSimpleContent(fetcherURL, fetcherBackupURL, id, page='', format='json', url='', referer='', remote_addr=''):
    #don't cache paginated content
    if (page):
        return APIUtils.parsePageContent(fetcherURL, fetcherBackupURL, id, page, format)
    else:
        cachedData = getCache(id, format)
        if (cachedData):
            return cachedData
        else:
            hnData = APIUtils.parsePageContent(fetcherURL, fetcherBackupURL, id, page, format)
            if (hnData):
                logging.debug('getHackerNewsSimpleContent: storing cached value for id %s' % id)
                DataCache.putData(id, format, APIUtils.removeNonAscii(hnData), url, referer, remote_addr)
                return hnData
            else:
                logging.warning('getHackerNewsSimpleContent: unable to retrieve data for id %s' % id)
                return ''
Example #22
0
    def _getTradeDirection(self, data, r1, r2, c1, c2):
        # if Bid is flashing it's Hit
        tradeDir = ' '
        #askSizeVideo = DataCache.getVideo(self.page, int(r1), int(c1) + 17) # askSize
        #bidSizeVideo = DataCache.getVideo(self.page, int(r1), int(c1) + 13) # bibSize
        # check Price
        askSizeVideo = DataCache.getVideo(self.page, int(r1),
                                          int(c1) + 8)  # askSize
        bidSizeVideo = DataCache.getVideo(self.page, int(r1),
                                          int(c1) + 4)  # bibSize
        if (askSizeVideo & 0x01) == 0x01:
            tradeDir = 'T'
        if (bidSizeVideo & 0x01) == 0x01:
            tradeDir = 'H'

        print 'Python::_getTradeDirection Shorcan: ', tradeDir, ', askSizeVideo: ', askSizeVideo, ', bidSizeVideo: ', bidSizeVideo
        return tradeDir
def getHackerNewsSubmittedContent(user, format='json', url='', referer='', remote_addr=''):
    #only cache homepage data
    apiURL = "%s/submitted?id=%s" % (AppConfig.hackerNewsURL, user)
    apiURLBackup = "%s/submitted?id=%s" % (AppConfig.hackerNewsURLBackup, user)
    id = '/submitted/%s' % (user)
    cachedData = None
    cachedData = getCache(id, format)
    if (cachedData):
        return cachedData
    else:
        hnData = APIUtils.parsePageContent(apiURL, apiURLBackup, '/submitted', None, format)
        if (hnData):
            logging.debug('getHackerNewsSubmittedContent: storing cached value for id %s' % id)
            DataCache.putData(id, format, APIUtils.removeNonAscii(hnData), url, referer, remote_addr)
            return hnData
        else:
            logging.warning('getHackerNewsSubmittedContent: unable to retrieve data for id %s' % id)
            return ''
Example #24
0
def calc_data_from_files_unif(files, noload = False, datafile = DC.POINTS4D_UNIFIED):
    imgs = [cv2.imread(f) for f in files]
    masks = [cv2.imread("imgs/00%d_mask.png" % i, 0) for i in range(5, 10)]
    sfm = SFMSolver(files, masks)
    matches, kpts = sfm.getMatches()

    data = None if noload else DC.getData(datafile)
    assert data is not None
    points, pointData = data
    return imgs, kpts, points, pointData
def getHackerNewsLatestContent(page='', format='json', url='', referer='', remote_addr='', limit=1):
    #only cache homepage data
    limit = int(limit)
    if (page):
        return APIUtils.parsePageContent(AppConfig.hackerNewsURL, AppConfig.hackerNewsURLBackup, '/latest', page, format, limit)
    else:
        id = '/latest/%s' % limit
        cachedData = getCache(id, format)
        if (cachedData):
            return cachedData
        else:
            hnData = APIUtils.parsePageContent(AppConfig.hackerNewsURL, AppConfig.hackerNewsURLBackup,  '/latest', page, format, limit)
            if (hnData):
                logging.debug('getHackerNewsLatestContent: storing cached value for id %s' % id)
                DataCache.putData(id, format, APIUtils.removeNonAscii(hnData), url, referer, remote_addr)
                return hnData
            else:
                logging.warning('getHackerNewsLatestContent: unable to retrieve data for id %s' % id)
                return ''
Example #26
0
def getCache(pageId, format):
    logging.debug('getCache: %s' % pageId)
    try:
        dbData = DataCache.getData(pageId, format)
        if (dbData):
            if (DataCache.hasExpired(dbData)):
                #data has expired, remove it
                try:
                    dbData[0].delete()
                    return None
                except:
                    logging.error('getCache: unable to remove cache')
                    return None
            else:
                logging.debug('getCache: got cached data for id %s' % id)
                return dbData[0].rec_xml
    except:
        logging.error('getCache: unable to get/retrieve cache')
        return None
def calc_data_from_files_unif(files, noload = False, datafile = DC.POINTS4D_UNIFIED):
    imgs = [cv2.imread(f) for f in files]
    masks = [cv2.imread("imgs/00%d_mask.png" % i, 0) for i in range(5, 10)]
    sfm = SFMSolver(files, masks)
    matches, kpts = sfm.getMatches()

    data = None if noload else DC.getData(datafile)
    assert data is not None
    points, pointData = data
    return imgs, kpts, points, pointData
def getCache(pageId, format):
    logging.debug('getCache: %s' % pageId)
    try:
        dbData = DataCache.getData(pageId, format)
        if (dbData):
            if (DataCache.hasExpired(dbData)):
                #data has expired, remove it
                try:
                    dbData[0].delete()
                    return None
                except:
                    logging.error('getCache: unable to remove cache')
                    return None
            else:
                logging.debug('getCache: got cached data for id %s' % id)
                return dbData[0].rec_xml
    except:
        logging.error('getCache: unable to get/retrieve cache')
        return None
Example #29
0
 def _processLines(self, lines):
     lines = cmdNse.workaroundDenonProtocol(lines)
     for line in lines:
         logger.debug("Process line: %s", line)
         if cmdVolume.processReply(line) is not None:
             logger.debug("Volume decoded: %s",
                          cache.getValue(cmdVolume.getId()))
         elif cmdPower.processReply(line) is not None:
             logger.debug("Power decoded: %s",
                          cache.getValue(cmdPower.getId()))
         elif cmdSource.processReply(line) is not None:
             logger.debug("Source decoded: %s",
                          cache.getValue(cmdSource.getId()))
         elif cmdSurround.processReply(line) is not None:
             logger.debug("Source decoded: %s",
                          cache.getValue(cmdSurround.getId()))
         else:
             reply = cmdNse.processReply(line)
             if reply is not None:
                 logger.debug("Display decoded: %s", reply)
Example #30
0
def _handleRequest(command, request='get'):
    logger.debug(command.getId() + " request: " + request)
    cmdRequest = command.createRequest(request)

    if cmdRequest is not None:
        RestService.remoteConnection.send(cmdRequest)
        return cache.waitValue(key=command.getId(),
                               timeout=defaultRequestTimeout)

    logger.debug(command.getId() + "unknown request: " + request)
    return "Invalid request"
Example #31
0
def lines():
    command = cmdLines
    logger.debug(command.getId() + " request: get")
    cmdRequest = command.createRequest('get')

    if cmdRequest is not None:
        RestService.remoteConnection.send(cmdRequest)
        return cache.waitQuery(query=command.getId(),
                               timeout=defaultRequestTimeout)

    logger.debug(command.getId() + "unknown request: " + request)
    return "Invalid request"
Example #32
0
def nowPlaying(request):
    evaluator = evlPlaying
    logger.debug(evaluator.getId() + " request: " + request)
    cmdRequest = evaluator.createRequest()

    if cmdRequest is not None:
        RestService.remoteConnection.send(cmdRequest)
        evaluator.evaluate(timeout=defaultRequestTimeout)
        return cache.getValue(key=request)

    logger.debug(evaluator.getId() + "unknown request: " + request)
    return "Invalid request"
Example #33
0
def estimate(filename, tco_est, tco_real, tor, ttc):
    temp = np.eye(4)
    temp[:3, :] = tco_est
    tco_est = temp
    temp = np.eye(4)
    temp[:3, :] = tco_real
    tco_real = temp
    pos_data_file = filename.replace("jpg", "p")
    posdata = DC.getData(pos_data_file)
    x, y, z, a, b, c = [posdata[0][i] for i in [500, 501, 502, 503, 504, 505]]
    a, b, c = map(lambda p: p * np.pi / 180, (a, b, c))  # deg to rad
    x, y, z = map(lambda p: p / 10.0, (x, y, z))  # mm to cm
    # print x, y, z,'|', a, b, c
    # img = cv2.imread(f)
    # img = cv2.pyrDown(img)
    # cv2.imshow("asd", img)
    # cv2.waitKey()
    trt = Utils.getTransform(c, b, a, x, y, z, True)
    # print trt.shape, ttc.shape, tco_est.shape
    print "---"
    print x, y, z, a, b, c
    print trt
    print ttc
    print tco_est
    # print tco_real
    print "---"
    # print np.linalg.inv(tco_real.dot(tor.dot(trt))) #ttc
    tro_est = trt.dot(ttc.dot(tco_est))
    print "tro est"
    print tro_est
    print "tro real"
    print np.linalg.inv(tor)
    print "cam pos est"
    pos_goal = [11, 5.2, -4, 1.0]
    # pos_goal = [0, 0, 0, 1.0]
    xyz_goal = tro_est.dot(np.array(pos_goal).T)
    print xyz_goal  # becsult pozicioja a kameranak amikor kozel volt
    # rr, pp, yy = map(lambda v: v * np.pi / 180, (-180, -14, -180))
    # print "rpy trf real"
    # print Utils.getTransform(rr, pp, yy, 0, 0, 0)
    print "trt_goal \n a, b, c "
    toc_goal = np.array([-1, 0, 0, pos_goal[0], 0, -1, 0, pos_goal[1], 0, 0, 1, pos_goal[2], 0, 0, 0, 1]).reshape(
        (4, 4))
    # toc_goal = np.array([1, 0, 0, pos_goal[0], 0, 1, 0, pos_goal[1], 0, 0, 1, pos_goal[2], 0, 0, 0, 1]).reshape(
    #     (4, 4))
    trt_goal = tro_est.dot(toc_goal.dot(np.linalg.inv(ttc)))
    cba_goal = np.array(map(np.rad2deg, Utils.rpy(trt_goal[:3, :3])))
    print trt_goal
    print cba_goal
    # print Utils.getTransform(rr, pp, yy, 0, 0, 0)
    return cba_goal, xyz_goal
def estimate(filename, tco_est, tco_real, tor, ttc):
    temp = np.eye(4)
    temp[:3, :] = tco_est
    tco_est = temp
    temp = np.eye(4)
    temp[:3, :] = tco_real
    tco_real = temp
    pos_data_file = filename.replace("jpg", "p")
    posdata = DC.getData(pos_data_file)
    x, y, z, a, b, c = [posdata[0][i] for i in [500, 501, 502, 503, 504, 505]]
    a, b, c = map(lambda p: p * np.pi / 180, (a, b, c))  # deg to rad
    x, y, z = map(lambda p: p / 10.0, (x, y, z))  # mm to cm
    # print x, y, z,'|', a, b, c
    # img = cv2.imread(f)
    # img = cv2.pyrDown(img)
    # cv2.imshow("asd", img)
    # cv2.waitKey()
    trt = Utils.getTransform(c, b, a, x, y, z, True)
    # print trt.shape, ttc.shape, tco_est.shape
    print "---"
    print x, y, z, a, b, c
    print trt
    print ttc
    print tco_est
    # print tco_real
    print "---"
    # print np.linalg.inv(tco_real.dot(tor.dot(trt))) #ttc
    tro_est = trt.dot(ttc.dot(tco_est))
    print "tro est"
    print tro_est
    print "tro real"
    print np.linalg.inv(tor)
    print "cam pos est"
    pos_goal = [11, 5.2, -4, 1.0]
    # pos_goal = [0, 0, 0, 1.0]
    xyz_goal = tro_est.dot(np.array(pos_goal).T)
    print xyz_goal  # becsult pozicioja a kameranak amikor kozel volt
    # rr, pp, yy = map(lambda v: v * np.pi / 180, (-180, -14, -180))
    # print "rpy trf real"
    # print Utils.getTransform(rr, pp, yy, 0, 0, 0)
    print "trt_goal \n a, b, c "
    toc_goal = np.array([-1, 0, 0, pos_goal[0], 0, -1, 0, pos_goal[1], 0, 0, 1, pos_goal[2], 0, 0, 0, 1]).reshape(
        (4, 4))
    # toc_goal = np.array([1, 0, 0, pos_goal[0], 0, 1, 0, pos_goal[1], 0, 0, 1, pos_goal[2], 0, 0, 0, 1]).reshape(
    #     (4, 4))
    trt_goal = tro_est.dot(toc_goal.dot(np.linalg.inv(ttc)))
    cba_goal = np.array(map(np.rad2deg, Utils.rpy(trt_goal[:3, :3])))
    print trt_goal
    print cba_goal
    # print Utils.getTransform(rr, pp, yy, 0, 0, 0)
    return cba_goal, xyz_goal
    def __find_object(self):
        import DataCache as DC
        from glob import glob
        from os.path import join
        import numpy as np
        from SFMSolver import SFMSolver, find_ext_params
        import Utils

        print "FINDING"

        np.set_printoptions(precision=3, suppress=True)

        files_dir = "out/2017_3_8__14_51_22/"
        files = glob(join(files_dir, "*.jpg"))
        masks = []
        for f in files:
            m = f.replace(".jpg", "_mask.png")
            masks.append(m)
        sfm = SFMSolver(files, masks)
        if self.obj_data is None:
            imgs, kpts, points, data = sfm.calc_data_from_files_triang_simple()
            self.obj_data = imgs, kpts, points, data
        else:
            imgs, kpts, points, data = self.obj_data

        arr_calib = DC.getData("out/%s/arrangement_calib.p" %
                               ARRANGEMENT_CALIB_DIR)
        ttc = arr_calib["ttc"]
        tor = arr_calib["tor"]
        if "cam_mtx" in arr_calib:
            print "camMtx, distcoeffs load"
            Utils.camMtx = arr_calib["cam_mtx"]
            Utils.dist_coeffs = arr_calib["dist_coeffs"]

        if self.stop_signal:
            self.stop_signal = False
            return

        for point in FIND_POINTS:
            values = {
                500: point[0],
                501: point[1],
                502: point[2],
                503: point[3],
                504: point[4],
                505: point[5],
            }
            print "set_values call"
            self.set_values(values, True)
            print "set_values return"

            time.sleep(0.5)
            CamGrabber.capture_if_no_chessboard = True
            CamGrabber.capture = True
            time.sleep(0.5)

            if self.stop_signal:
                self.stop_signal = False
                return

        find_dir = logger.outputdir
        files = glob("%s/*.jpg" % find_dir)
        print files
        # files_dir = "out/2017_4_5__15_57_20/"
        # files = glob(join(files_dir, "*.jpg"))
        files.sort()
        files = files[-len(FIND_POINTS):]
        results = []

        for f in files:
            res = find_ext_params(f, imgs, kpts, points, data, tor, ttc)
            results.append(res)
            if self.stop_signal:
                self.stop_signal = False
                return

        for i in range(len(results)):
            print i, results[i]
            write_log((i, results[i]))
        result = max(results, key=lambda x: x[2])
        write_log(result)

        values = {
            500: int(result[0][0] * 10),
            501: int(result[0][1] * 10),
            502: int(result[0][2] * 10) + 200,
            503: int(result[1][2]),
            504: int(result[1][1]),
            505: int(result[1][0]),
        }

        print "num inl: ", result[2]
        pprint(values)
        self.set_values(values, go_to_value=False)
        self.find_thread = None
Example #36
0
def img_test_complete_from_files(out_dir, num_rot_calib_imgs, use_calib_data = False):
    file_names_pattern = "%s/*.jpg" % out_dir
    files = glob(file_names_pattern)
    files_rot = files[:num_rot_calib_imgs]

    calib_data_rot = None
    calib_data_trans = None
    if use_calib_data:
        print "using calib data"
        calib_file = "%s/calib_data.p" % out_dir
        calib_data = DC.getData(calib_file)
        if calib_data:
            print "calib_data not None"
            data_len = len(calib_data["tvecs"])
            tvecs = calib_data["tvecs"]
            rvecs = calib_data["rvecs"]
            Utils.camMtx = calib_data["cam_mtx"]
            Utils.dist_coeffs = calib_data["dist_coeffs"]
            calib_data = [(rvecs[i], tvecs[i]) for i in range(data_len)]
            calib_data_rot = calib_data[:num_rot_calib_imgs]
            calib_data_trans = calib_data[num_rot_calib_imgs:]
        else:
            print "ERROR: calib_data is None"

    pattern_size = (9, 6)
    pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
    pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
    pattern_points *= 2.615

    robot_coords_rot = []
    imgpts = []

    a, b, c = -1, -1, -1
    tmats_rt = []
    for f in files_rot:
        datafile = os.path.splitext(f)[0] + ".p"
        pfile = file(datafile)
        data = pickle.load(pfile)
        pfile.close()

        x, y, z, a, b, c = [data[0][i] for i in [500, 501, 502, 503, 504, 505]]
        a, b, c = map(lambda p: p * np.pi / 180     , (a, b, c))  # deg to rad
        x, y, z = map(lambda p: p / 10.0            , (x, y, z))  # mm to cm
        robot_coords_rot.append([x, y, z, a, b, c])
        tmats_rt.append(Utils.getTransform(c, b, a, x, y, z, True))

        img = cv2.imread(f)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = normalize(gray)
        rv, corners = cv2.findChessboardCorners(gray, (9, 6))
        # drawCorners(img, corners)
        cv2.cornerSubPix(gray, corners, (9, 6), (-1, -1),
                         criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
        # drawCorners(img, corners)

        imgpts_curr = corners.reshape((54, 2))
        imgpts.append(imgpts_curr)

    ror, toc = calc_rot(imgpts, pattern_points, robot_coords_rot, True, calib_data_rot)
    roc = calc_avg_rot([toci[:3,:3] for toci in toc])
    rrt = Utils.getTransform(c, b, a, 0, 0, 0, True)[:3, :3]
    rtc = rrt.T.dot(ror.T.dot(roc))

    print Utils.rpy(ror)
    print ror

    robot_coords_trans = []
    imgpts_trans = []
    tmats_rt_trans = []
    files_trans = files[num_rot_calib_imgs:]
    print [(i, os.path.basename(files_trans[i])) for i in range(len(files_trans))]
Example #37
0
    def __find_object(self):
        import DataCache as DC
        from glob import glob
        from os.path import join
        import numpy as np
        from SFMSolver import SFMSolver, find_ext_params
        import Utils

        print "FINDING"

        np.set_printoptions(precision=3, suppress=True)

        files_dir = "out/2017_3_8__14_51_22/"
        files = glob(join(files_dir, "*.jpg"))
        masks = []
        for f in files:
            m = f.replace(".jpg", "_mask.png")
            masks.append(m)
        sfm = SFMSolver(files, masks)
        if self.obj_data is None:
            imgs, kpts, points, data = sfm.calc_data_from_files_triang_simple()
            self.obj_data = imgs, kpts, points, data
        else:
            imgs, kpts, points, data = self.obj_data

        arr_calib = DC.getData("out/%s/arrangement_calib.p" % ARRANGEMENT_CALIB_DIR)
        ttc = arr_calib["ttc"]
        tor = arr_calib["tor"]
        if "cam_mtx" in arr_calib:
            print "camMtx, distcoeffs load"
            Utils.camMtx = arr_calib["cam_mtx"]
            Utils.dist_coeffs = arr_calib["dist_coeffs"]

        if self.stop_signal:
            self.stop_signal = False
            return

        for point in FIND_POINTS:
            values = {
                500: point[0],
                501: point[1],
                502: point[2],
                503: point[3],
                504: point[4],
                505: point[5],
            }
            print "set_values call"
            self.set_values(values, True)
            print "set_values return"

            time.sleep(0.5)
            CamGrabber.capture_if_no_chessboard = True
            CamGrabber.capture = True
            time.sleep(0.5)

            if self.stop_signal:
                self.stop_signal = False
                return

        find_dir = logger.outputdir
        files = glob("%s/*.jpg" % find_dir)
        print files
        # files_dir = "out/2017_4_5__15_57_20/"
        # files = glob(join(files_dir, "*.jpg"))
        files.sort()
        files = files[-len(FIND_POINTS):]
        results = []

        for f in files:
            res = find_ext_params(f, imgs, kpts, points, data, tor, ttc)
            results.append(res)
            if self.stop_signal:
                self.stop_signal = False
                return

        for i in range(len(results)):
            print i, results[i]
            write_log((i, results[i]))
        result = max(results, key=lambda x: x[2])
        write_log(result)


        values = {
            500: int(result[0][0] * 10),
            501: int(result[0][1] * 10),
            502: int(result[0][2] * 10) + 200,
            503: int(result[1][2]),
            504: int(result[1][1]),
            505: int(result[1][0]),
        }

        print "num inl: ", result[2]
        pprint(values)
        self.set_values(values, go_to_value=False)
        self.find_thread = None
Example #38
0
    ttc[:3, :3] = rtc
    ttc[:3, 3] = vtc.reshape((3,))

    print "vtc, vor = "
    print x # vtc, vor
    reprojectPoints(
        tor,
        tmats_rt + tmats_rt_trans,
        ttc,
        cammtx,
        pattern_points,
        imgpts + imgpts_trans)
    print "cammtx"
    print Utils.camMtx

    DC.saveData("%s/arrangement_calib.p" % out_dir, {"ttc": ttc, "tor": tor, "cam_mtx": Utils.camMtx, "dist_coeffs": Utils.dist_coeffs})

def filter_contours(contours):
    # print  len(contours)
    # h, w = dil.shape
    # area = h * w
    # contours = [c for c in contours if cv2.contourArea(c) > area / 8]
    # print  len(contours)
    contours = [cv2.approxPolyDP(c, 20, True) for c in contours]
    # print  len(contours)
    contours = [c for c in contours if c.shape[0] == 4]
    # print len(contours)
    contours = [c for c in sorted(contours, key=lambda c: cv2.contourArea(c))]
    contours = [c for c in contours if cv2.contourArea(c) > 5000]
    return contours
Example #39
0
    flags=cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_FIX_FOCAL_LENGTH
    | cv2.CALIB_FIX_PRINCIPAL_POINT)

outfile.write("mat, distcoeff \r\n %s \r\n .. \r\n %s" % (str(mtx), str(dist)))
print ret
print mtx
print dist
mean_error = 0
for i in xrange(len(objpoints)):
    imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx,
                                      dist)
    error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
    img = cv2.imread(fnames[i])
    img2 = cv2.pyrDown(img)
    cv2.drawChessboardCorners(img2, grid_size, imgpoints2 / 2, True)
    cv2.imshow("img", img2)
    cv2.waitKey(1)

    mean_error += error

print "total error (average per image): ", mean_error / len(objpoints)
print len(objpoints)
end_time = time()
total_time = end_time - start_time
timestr = "\r\n time elapsed = %dm%ds" % (total_time / 60, total_time % 60)
print timestr
outfile.write(timestr)

data = {"cam_mtx": mtx, "dist_coeffs": dist, "rvecs": rvecs, "tvecs": tvecs}
DataCache.saveData(join(dirname(imgs_path), "calib_data.p"), data)
outfile.close()
Example #40
0
File: Inst.py Project: mitkatch/BPE
    def publish(self, data, r1, r2, c1, c2):
        data = re.sub('O', '0', data)
        try:
            data = re.sub('O', '0', data)

            tradeDir = self._getTradeDirection(data, r1, r2, c1, c2)

            bid = self._toBid(data)
            ask = self._toAsk(data)
            bid_size = self._toBidSize(data)
            ask_size = self._toAskSize(data)
            bid_yield = self._toBidYield(data)
            ask_yield = self._toAskYield(data)
            bmark = self._toBMark(data)

            inst = {
                'ML_GRAPH_ADDRESS':
                self.symbol,
                #'SYMBOL'	: self.symbol,
                'SECUR_NAME':
                self.symbol,
                ###'INST_CODE'	: self.symbol,
                #'DESC'		: self.desc,
                'DSPLY_NAME':
                self.desc,
                'ISSUER':
                self.issuer,
                ###'CURRENCY'	: self.currency,
                ###'COUNTRY'	: self.country,
                #'SEQ_OUT'	: self._getSeqOut(),
                'WEB_ADRS':
                self._getSeqOut(),
                'INSTR_TYPE':
                self.type,
                'INSTR_SUBTYPE':
                self.subtype,
                'INSTR_SUBTYPE_1':
                self.subtype1,
                'INSTR_SUBTYPE_2':
                self.subtype2,
                ###'ML_MAPPED' :self.name,
                'ROW66_23':
                self.name,
                ###'BENCHMARK'    : bmark,
                'ROW66_2':
                bmark,
                ###'ML_LABEL_SLOT' : str(self.label_slot),
                ###'ML_DATA_SLOT' : str(self.page + ':' + str(self.data_slot)),
                ###'ML_RAW_DATA': data,
                'ROW64_11':
                data,
                ###'CPN'	: self.coupon,
                ###'MATURITY'	: self.mat,
                ###'MATURITY_DD'	: self.matDD,
                ###'MATURITY_MM'	: self.matMM,
                ###'MATURITY_YYYY'	: self.matYYYY,
                'BID':
                float(self._hit(bid, tradeDir)),
                'BIDSIZE':
                float(self._hit(bid_size, tradeDir)),
                'BID_TIME':
                self.bid_time,
                ###'BID_CHG'	: float(self._bidChg(bid)),
                'BID_YIELD':
                float(self._hit(bid_yield, tradeDir)),
                ###'BID_YIELD_CHG' : float(self._bidYieldChg(bid_yield)),
                'ASK':
                float(self._take(ask, tradeDir)),
                'ASK_TIME':
                self.ask_time,
                ###'ASK_CHG'	: float(self._askChg(ask)),
                'ASKSIZE':
                float(self._take(ask_size, tradeDir)),
                'ASK_YIELD':
                float(self._take(ask_yield, tradeDir)),
                ###'ASK_YIELD_CHG' : float(self._askYieldChg(ask_yield)),
                #'TRADE_TONE' : str(tradeDir),
                'ROW66_20':
                str(tradeDir),
                #'TRADE_PRICE' : float(self._toLastTradePrice(bid, ask, tradeDir)),
                'ROW66_15':
                float(self._toLastTradePrice(bid, ask, tradeDir)),
                #'TRADE_SIZE' : float(self._toLastTradeSize(bid_size, ask_size, tradeDir)),
                'ROW66_16':
                float(self._toLastTradeSize(bid_size, ask_size, tradeDir)),
                #'TRADE_YIELD' : float(self._toLastTradeYield(bid_yield, ask_yield, tradeDir)),
                'ROW66_21':
                float(self._toLastTradeYield(bid_yield, ask_yield, tradeDir)),
                #'TRADE_TIME' : str(self._toLastTradeTime()),
                'ROW66_18':
                str(self._toLastTradeTime()),
                ##'TIMESTAMP'	: time.strftime('%x %X', time.localtime())
                'ROW64_12':
                time.strftime('%x %X', time.localtime())
            }
            #print 'Python::ShorcanBond::publish ', inst
            status = DataCache.updateInstrument(inst, self.symbol)
            return (str(status), )
        except Exception, e:
            print 'Python::Inst::publish error ', e, data
            return ('0', )
Example #41
0
def img_test_complete_from_files(out_dir,
                                 num_rot_calib_imgs,
                                 use_calib_data=False):
    file_names_pattern = "%s/*.jpg" % out_dir
    files = glob(file_names_pattern)
    files_rot = files[:num_rot_calib_imgs]

    calib_data_rot = None
    calib_data_trans = None
    if use_calib_data:
        print "using calib data"
        calib_file = "%s/calib_data.p" % out_dir
        calib_data = DC.getData(calib_file)
        if calib_data:
            print "calib_data not None"
            data_len = len(calib_data["tvecs"])
            tvecs = calib_data["tvecs"]
            rvecs = calib_data["rvecs"]
            Utils.camMtx = calib_data["cam_mtx"]
            Utils.dist_coeffs = calib_data["dist_coeffs"]
            calib_data = [(rvecs[i], tvecs[i]) for i in range(data_len)]
            calib_data_rot = calib_data[:num_rot_calib_imgs]
            calib_data_trans = calib_data[num_rot_calib_imgs:]
        else:
            print "ERROR: calib_data is None"

    pattern_size = (9, 6)
    pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
    pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
    pattern_points *= 2.615

    robot_coords_rot = []
    imgpts = []

    a, b, c = -1, -1, -1
    tmats_rt = []
    for f in files_rot:
        datafile = os.path.splitext(f)[0] + ".p"
        pfile = file(datafile)
        data = pickle.load(pfile)
        pfile.close()

        x, y, z, a, b, c = [data[0][i] for i in [500, 501, 502, 503, 504, 505]]
        a, b, c = map(lambda p: p * np.pi / 180, (a, b, c))  # deg to rad
        x, y, z = map(lambda p: p / 10.0, (x, y, z))  # mm to cm
        robot_coords_rot.append([x, y, z, a, b, c])
        tmats_rt.append(Utils.getTransform(c, b, a, x, y, z, True))

        img = cv2.imread(f)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = normalize(gray)
        rv, corners = cv2.findChessboardCorners(gray, (9, 6))
        # drawCorners(img, corners)
        cv2.cornerSubPix(gray,
                         corners, (9, 6), (-1, -1),
                         criteria=(cv2.TERM_CRITERIA_EPS +
                                   cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
        # drawCorners(img, corners)

        imgpts_curr = corners.reshape((54, 2))
        imgpts.append(imgpts_curr)

    ror, toc = calc_rot(imgpts, pattern_points, robot_coords_rot, True,
                        calib_data_rot)
    roc = calc_avg_rot([toci[:3, :3] for toci in toc])
    rrt = Utils.getTransform(c, b, a, 0, 0, 0, True)[:3, :3]
    rtc = rrt.T.dot(ror.T.dot(roc))

    print Utils.rpy(ror)
    print ror

    robot_coords_trans = []
    imgpts_trans = []
    tmats_rt_trans = []
    files_trans = files[num_rot_calib_imgs:]
    print[(i, os.path.basename(files_trans[i]))
          for i in range(len(files_trans))]
    for f in files_trans:
        datafile = os.path.splitext(f)[0] + ".p"
        pfile = file(datafile)
        data = pickle.load(pfile)
        pfile.close()

        x, y, z, a, b, c = [data[0][i] for i in [500, 501, 502, 503, 504, 505]]
        a, b, c = map(lambda p: p * np.pi / 180, (a, b, c))  # deg to rad
        x, y, z = map(lambda p: p / 10.0, (x, y, z))  # mm to cm
        robot_coords_trans.append([x, y, z, a, b, c])
        tmats_rt_trans.append(Utils.getTransform(c, b, a, x, y, z, True))

        img = cv2.imread(f)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = normalize(gray)
        rv, corners = cv2.findChessboardCorners(gray, (9, 6))
        # drawCorners(img, corners)
        cv2.cornerSubPix(gray,
                         corners, (9, 6), (-1, -1),
                         criteria=(cv2.TERM_CRITERIA_EPS +
                                   cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
        # drawCorners(img, corners)

        imgpts_curr = corners.reshape((54, 2))
        imgpts_trans.append(imgpts_curr)

    x, toc = calc_trans(imgpts_trans, pattern_points, robot_coords_trans, ror,
                        True, calib_data_trans)
    vtc = x[:3, :]
    vor = x[3:, :]
    tor = np.eye(4)
    tor[:3, :3] = ror
    tor[:3, 3] = vor.reshape((3, ))
    ttc = np.eye(4)
    ttc[:3, :3] = rtc
    ttc[:3, 3] = vtc.reshape((3, ))

    print "vtc, vor = "
    print x  # vtc, vor
    reprojectPoints(tor, tmats_rt + tmats_rt_trans, ttc, cammtx,
                    pattern_points, imgpts + imgpts_trans)
    print "cammtx"
    print Utils.camMtx

    DC.saveData(
        "%s/arrangement_calib.p" % out_dir, {
            "ttc": ttc,
            "tor": tor,
            "cam_mtx": Utils.camMtx,
            "dist_coeffs": Utils.dist_coeffs
        })
Example #42
0
import logging
import DataCache as cache

import commands.Source as cmdSource
import commands.Nse as cmdNse

logger = logging.getLogger(__name__)

_id = 'playing'

_cachedValues = {
  'artist' : cache.CachedValue("artist"),
  'title' : cache.CachedValue("title"),
  'album' : cache.CachedValue("album")
}

def getId():
  return _id

def createRequest(request='get'):
  for key in _cachedValues:
    _cachedValues[key].invalidate()
  cmdRequest = cmdSource.createRequest()
  cmdRequest += cmdNse.createRequest()
  return cmdRequest

def evaluate(timeout=None):
  source = cache.waitValue(cmdSource.getId(), timeout)
  if not _isPlayingSource(source):
    _clearValues()
    return
Example #43
0
    def _addInst(self, source, name, page, data, r1, r2, c1, c2):
        #source = __name__
        strlbl = eval('Bond.like' + source + 'Bond(data)')
        # call Bond.likeShorcanBond(data)
        status = 0
        refresh = 0
        label_slot = self._setTuple(r1, r2, c1, c2)
        if strlbl:
            bond = eval('Bond.' + source + 'Bond(name, page, strlbl)')

            status = DataCache.addInstrument(bond.symbol)
            if status == 0:
                print 'Python::_addInst duplication found ', bond.symbol
                bondFound = self.labels.get(label_slot, None)
                if bondFound:
                    if bond.symbol == bondFound.symbol:
                        bond = bondFound
                        print 'Run Python::_addInst refresh for Bond ', bond.symbol, data
                        refresh = 1
                        # just a refresh

            if refresh == 0:
                bond.label_slot = label_slot
                bond.data_slot = self._getDataSlotByLabel(label_slot)
                print 'Run Python::_addInst Bond ', bond.symbol, r1, r2, c1, c2
                self.labels[label_slot] = bond
        else:
            strlbl = eval('Bill.like' + source + 'Bill(data)')
            if strlbl:
                bill = eval('Bill.' + source + 'Bill(name, page, strlbl)')
                print 'Run Python::_addInst Bill ', bill.symbol, r1, r2, c1, c2
                status = DataCache.addInstrument(bill.symbol)

                if status == 0:
                    print 'Python::_addInst duplication found ', bill.symbol
                    billFound = self.labels.get(label_slot, None)
                    if billFound:
                        if bill.symbol == billFound.symbol:
                            bill = billFound
                            print 'Run Python::_addInst refresh for Bill ', bill.symbol, data
                            refresh = 1
                            # just a refresh

                if refresh == 0:
                    bill.label_slot = label_slot
                    bill.data_slot = self._getDataSlotByLabel(label_slot)

                    self.labels[label_slot] = bill
            else:
                strlbl = eval('Prov.like' + source + 'Prov(data)')
                if strlbl:
                    prov = eval('Prov.' + source + 'Prov(name, page, strlbl)')
                    prov.setSymbol(label_slot)
                    prov.data_slot = self._getDataSlotByLabel(label_slot)
                    print 'Run Python::_addInst Prov ', prov.symbol, r1, r2, c1, c2
                    status = DataCache.addInstrument(prov.symbol)
                    if status == 0:
                        print 'Python::_addInst duplication found ', prov.symbol
                        provFound = self.labels.get(label_slot, None)
                        if provFound:
                            if (not prov.isEqual(provFound)):
                                provFound.copy(prov)
                                #inst changed
                        else:
                            self.labels[label_slot] = prov
                            #it has been deleted
                    else:
                        self.labels[label_slot] = prov
                else:
                    print 'Run Python::_addInst i dont know what it is ', data, r1, r2, c1, c2
                    inst = self.labels.get(label_slot, None)
                    if (inst):
                        del self.labels[label_slot]
                        print 'Run Python::_addInst remove from labels', label_slot, '[', inst.symbol, ']'

        return status
Example #44
0
import logging
import DataCache as cache
import commands._SimpleCommand as cmdSimple

logger = logging.getLogger(__name__)

_id = 'surround'
_prefix = 'MS'
_requests = [
    'MOVIE', 'MUSIC', 'GAME', 'PURE DIRECT', 'DIRECT', 'STEREO', 'STANDARD',
    'DOLBY DIGITAL', 'DTS SUROUND', 'MCH STEREO', 'ROCK ARENA', 'JAZZ CLUB',
    'MONO MOVIE', 'MATRIX', 'VIDEO', 'VIRTUAL'
]

cachedValue = cache.CachedValue(_id)


def getId():
    return _id


def cmdPrefix():
    return _prefix


def createRequest(request='get'):
    cachedValue.invalidate()
    request = request.upper()

    request = request.replace('_', ' ')
Example #45
0
import logging
import threading
import time
import DataCache as cache

logger = logging.getLogger(__name__)

_id = 'line'
_prefix = 'NSE'
_timeLastCreated = 0.0
_REQUEST_INTERVAL_SEC = 2.0

_cachedValues = {
  '0' : cache.CachedValue(_id + "0"),
  '1' : cache.CachedValue(_id + "1"),
  '2' : cache.CachedValue(_id + "2"),
  '3' : cache.CachedValue(_id + "3"),
  '4' : cache.CachedValue(_id + "4"),
  '5' : cache.CachedValue(_id + "5"),
  '6' : cache.CachedValue(_id + "6"),
  '7' : cache.CachedValue(_id + "7"),
  '8' : cache.CachedValue(_id + "8")
}

def getId():
  return _id

def cmdPrefix():
  return _prefix