Esempio n. 1
0
    def read(self, fn):
        """
        Read a tabbed format line
        Each line consists of:
        sent, prob, pred, arg1, arg2, ...
        """
        d = {}
        ex_index = 0
        with open(fn) as fin:
            for line in fin:
                if not line.strip():
                    continue
                data = line.strip().split('\t')
                try:
                    text, confidence, rel = data[:3]
                except:
                    continue
                curExtraction = Extraction(
                    pred=rel,
                    head_pred_index=None,
                    sent=text,
                    confidence=float(confidence),
                    question_dist=
                    "./question_distributions/dist_wh_sbj_obj1.json",
                    index=ex_index)
                ex_index += 1

                for arg in data[3:]:
                    curExtraction.addArg(arg)

                d[text] = d.get(text, []) + [curExtraction]
        self.oie = d
Esempio n. 2
0
    def __init__(self, nodeName="slam_ekf"):
        super(SLAM_Localization, self).__init__(nodeName)

        self.icp = SubICP()
        self.extraction = Extraction()

        self.isFirstScan = True
        self.laser_count = 0
        # interval
        self.laser_interval = 5
        # State Vector [x y yaw].T, column vector.
        # self.xOdom = np.zeros((STATE_SIZE,1))
        self.xEst = np.zeros((STATE_SIZE, 1))
        # Covariance. Initial is very certain.
        self.PEst = np.zeros((STATE_SIZE, STATE_SIZE))
        # landMark Estimation. Like former self.tar_pc
        self.lEst = np.zeros((LM_SIZE, 0))  # lEst should be of 2*N size

        # ros topic
        self.laser_sub = rospy.Subscriber('/course_agv/laser/scan', LaserScan,
                                          self.laserCallback)
        # self.location_pub = rospy.Publisher('ekf_location',Odometry,queue_size=3)

        ## localization parameters
        # minimum landmark matches to update.
        self.min_match = int(rospy.get_param('/slam/min_match', 2))
        # minimum number of points for a landmark cluster
        self.extraction.landMark_min_pt = int(
            rospy.get_param('/slam/landMark_min_pt', 1))
        # maximum radius to be identified as landmark
        self.extraction.radius_max_th = float(
            rospy.get_param('/slam/radius_max_th', 0.4))

        OBSTACLE_RADIUS = 0.35
Esempio n. 3
0
    def __init__(self):
        self.icp = ICP()
        self.ekf = EKF()
        self.extraction = Extraction()

        # odom robot init states
        self.robot_x = rospy.get_param('/icp/robot_x',0)
        self.robot_y = rospy.get_param('/icp/robot_y',0)
        self.robot_theta = rospy.get_param('/icp/robot_theta',0)
        self.sensor_sta = [self.robot_x,self.robot_y,self.robot_theta]
        self.isFirstScan = True
        self.src_pc = []
        self.tar_pc = []

        # State Vector [x y yaw]
        self.xOdom = np.zeros((3,1))
        self.xEst = np.zeros((3,1))
        self.PEst = np.eye(3)
        
        # map observation
        self.obstacle = []
        # radius
        self.obstacle_r = 10

        # init map
        self.updateMap()
        # ros topic
        self.laser_sub = rospy.Subscriber('/course_agv/laser/scan',LaserScan,self.laserCallback)
        self.location_pub = rospy.Publisher('ekf_location',Odometry,queue_size=3)
        self.odom_pub = rospy.Publisher('icp_odom',Odometry,queue_size=3)
        self.odom_broadcaster = tf.TransformBroadcaster()
        self.landMark_pub = rospy.Publisher('/landMarks',MarkerArray,queue_size=1)
    def __init__(self, cam_type='zed', realtime=False):

        if cam_type is 'zed':
            odom_file = '/export/patraval/robo_car_new_loop_all/zed_front/gps/fix.txt'
            self.initial_offset = 6070
            self.offset_length = 6634

        elif cam_type is 'pg':
            # odom_file = '/export/patraval/robo_car_loop2/pg_cam/gps/fix.txt'
            odom_file = '/export/patraval/robo_loop_pg_only/pg_cam/gps/fix.txt'

        self.initial_offset = 5953
        self.offset_length = 6522

        self.initial_offset = 1600  # pg  #1417 #-- zed
        # self.initial_offset = 1948

        self.offset_length = 6000

        # self.initial_offset = 3510
        # self.offset_length = 6634
        print(self.initial_offset, self.offset_length)
        self.transformer = State_Transition(odom_file, cam_type,
                                            self.initial_offset, realtime)
        self.extractor = Extraction(cam_type)
Esempio n. 5
0
def address():
    question = request.args.get('question', 0, type=str)
    filtered_sentence = Parssing.get_main_words(datas_management, question)
    try:
        url_googleaddress = Extraction.get_urladdress(datas_extraction,
                                                      filtered_sentence)
        googleapi_data = Extraction.get_googleapi_data(datas_extraction,
                                                       url_googleaddress)
        address_location = Extraction.get_address(datas_extraction,
                                                  googleapi_data)
        random_sentence = random.choice(sentences_list.sentences_address)
        address_sentence = random_sentence + address_location
        return jsonify(result=address_sentence)
    except (ValueError, TypeError):
        return jsonify(result="Mince, je ne trouve pas :)")
Esempio n. 6
0
def map():
    question = request.args.get('question', 0, type=str)
    filtered_sentence = Parssing.get_main_words(datas_management, question)
    url_googleaddress = Extraction.get_urladdress(datas_extraction,
                                                  filtered_sentence)
    googleapi_data = Extraction.get_googleapi_data(datas_extraction,
                                                   url_googleaddress)
    lat = Extraction.get_lat(datas_extraction, googleapi_data)
    lng = Extraction.get_lng(datas_extraction, googleapi_data)
    if lat is None or lng is None:
        return jsonify(result="")
    else:
        url_googlemap = Extraction.get_urlmap(datas_extraction,
                                              filtered_sentence, lat, lng)
        return jsonify(result=url_googlemap)
Esempio n. 7
0
    def requires(self):
        """
        Depends on list of Extraction tasks.
        """

        for url in self.urls:
            yield Extraction(url)
Esempio n. 8
0
    def read(self, fn):
        d = defaultdict(lambda: [])
        with open(fn) as fin:
            for line_ind, line in enumerate(fin):
                data = line.strip().split('\t')
                text, rel = data[:2]
                args = data[2:]
                confidence = 1

                curExtraction = Extraction(pred=rel,
                                           head_pred_index=None,
                                           sent=text,
                                           confidence=float(confidence),
                                           index=line_ind)
                for arg in args:
                    curExtraction.addArg(arg)

                d[text].append(curExtraction)
        self.oie = d
Esempio n. 9
0
def wikilink():
    question = request.args.get('question', 0, type=str)
    filtered_sentence = Parssing.get_main_words(datas_management, question)
    first_word = Parssing.wiki_firstword(datas_management, filtered_sentence)
    try:
        link_wiki = Extraction.wiki_link(datas_extraction, first_word)
        return jsonify(result=link_wiki)
    except wikipedia.exceptions.DisambiguationError:
        return jsonify(result="Ou faire une nouvelle recherche...")
    except wikipedia.exceptions.PageError:
        return jsonify(result="Essaie autre chose...")
Esempio n. 10
0
    def setUpClass(cls):
        cls.extraction_task_1 = Extraction(url=TEST_URL_1)
        cls.extraction_task_2 = Extraction(url=TEST_URL_2)

        cls.saving_task = Saving([TEST_URL_1, TEST_URL_2])
        cls.saving_task.input = lambda: [
            cls.extraction_task_1.output(),
            cls.extraction_task_2.output()
        ]
        cls.cmd("hadoop fs -rm -r /{app_name}".format(
            app_name=APPLICATION_NAME).split())
        cls.cmd("hadoop fs -mkdir -p /{app_name}/{ex_out}".format(
            app_name=APPLICATION_NAME, ex_out=EXTRACTION_OUTPUT).split())
        cls.cmd(
            "hadoop fs -put test-resources/https--en.wikipedia.org-wiki-Battle_of_Austerlitz.html /{app_name}/{ex_out}"
            .format(app_name=APPLICATION_NAME,
                    ex_out=EXTRACTION_OUTPUT).split())
        cls.cmd(
            "hadoop fs -put test-resources/https--en.wikipedia.org-wiki-Napoleon.html /{app_name}/{ex_out}"
            .format(app_name=APPLICATION_NAME,
                    ex_out=EXTRACTION_OUTPUT).split())
    def __init__(self):
        # ros param
        self.robot_x = rospy.get_param('/slam/robot_x',0)
        self.robot_y = rospy.get_param('/slam/robot_y',0)
        self.robot_theta = rospy.get_param('/slam/robot_theta',0)
        ## ros param of mapping
        self.map_x_width = rospy.get_param('/slam/map_width')
        self.map_y_width = rospy.get_param('/slam/map_height')
        self.map_reso = rospy.get_param('/slam/map_resolution')
        self.map_cellx_width = int(round(self.map_x_width/self.map_reso))
        self.map_celly_width = int(round(self.map_y_width/self.map_reso))

        self.icp = ICP()
        self.ekf = EKF()
        self.extraction = Extraction()
        self.mapping = Mapping(self.map_cellx_width,self.map_celly_width,self.map_reso)

        # odom robot init states
        self.sensor_sta = [self.robot_x,self.robot_y,self.robot_theta]
        self.isFirstScan = True
        self.src_pc = []
        self.tar_pc = []

        # State Vector [x y yaw]
        self.xOdom = np.zeros((STATE_SIZE, 1))
        self.xEst = np.zeros((STATE_SIZE, 1))
        self.PEst = np.eye(STATE_SIZE)
        
        # map observation
        self.obstacle = []
        # radius
        self.obstacle_r = 10

        # ros topic
        self.laser_sub = rospy.Subscriber('/course_agv/laser/scan',LaserScan,self.laserCallback)
        self.location_pub = rospy.Publisher('ekf_location',Odometry,queue_size=3)
        self.odom_pub = rospy.Publisher('icp_odom',Odometry,queue_size=3)
        self.odom_broadcaster = tf.TransformBroadcaster()
        self.landMark_pub = rospy.Publisher('/landMarks',MarkerArray,queue_size=1)
        self.map_pub = rospy.Publisher('/slam_map',OccupancyGrid,queue_size=1)
Esempio n. 12
0
    def non_zero_row(self, test_row, col_names, q_alias):
        non_zero = 0
        check = False
        check3 = False
        nil = 0
        flag = 0
        q_col = 0
        number = ''
        for i in col_names:
            try:
                if Extraction.capitalised(self, i[0]) in q_alias:
                    q_col = i[1]
                    break
                else:
                    for temp_val in q_alias:
                        if temp_val in Extraction.capitalised(self, i[0]):
                            q_col = i[1]
                            break
            except Exception as e:
                pass

        for i in range(len(test_row)):
            if test_row[i] == '':
                nil += 1

        if test_row[q_col] != '':
            if type(test_row[q_col]) == int or type(test_row[q_col]) == float:
                check3 = True
        if check3:
            if len(test_row) - 2 * nil >= 0:
                return True

            else:
                return True

        else:
            return False
Esempio n. 13
0
    def __init__(self, nodeName="ekf_icp"):
        super(EKF_Landmark_Localization, self).__init__(nodeName)

        self.icp = SubICP()
        self.extraction = Extraction()

        self.src_pc = None
        self.isFirstScan = True
        self.laser_count = 0
        # interval
        self.laser_interval = 5

        # State Vector [x y yaw].T, column vector.
        self.xEst = np.zeros((STATE_SIZE, 1))
        # Covariance. Initial state is certain.
        # self.PEst=np.eye(STATE_SIZE)
        self.PEst = np.zeros((STATE_SIZE, STATE_SIZE))

        # init map
        # map observation
        self.tar_pc = None
        self.updateMap()

        # ros topic
        self.laser_sub = rospy.Subscriber('/course_agv/laser/scan', LaserScan,
                                          self.laserCallback)
        # self.location_pub = rospy.Publisher('ekf_location',Odometry,queue_size=3)

        # parameters from launch file.
        # minimum landmark matches to update. Actually even 1 point is acceptable.
        self.min_match = int(rospy.get_param('/localization/min_match', 1))
        # minimum number of points for a landmark cluster
        self.extraction.landMark_min_pt = int(
            rospy.get_param('/localization/landMark_min_pt', 2))
        # maximum radius to be identified as landmark
        self.extraction.radius_max_th = float(
            rospy.get_param('/localization/radius_max_th', 0.4))
Esempio n. 14
0
 def formatting_data(self, convert_nb_2_i, size_type, source_wb, dest_wb,
                     data_col, alias_data, material_check, a105, f22, f91,
                     f92, wcb, wcc, wc9, c12a, c12ab):
     sheet = source_wb.sheet_by_name('Standard Sheet')
     sheet_f = dest_wb.get_sheet_by_name('Standard Sheet')
     redFill = PatternFill(start_color='00CED1FF',
                           end_color='00CED1FF',
                           fill_type='solid')
     for data in alias_data:
         print "FORMATTING FOR -" + str(data)
         for row_s in range(sheet.nrows):
             try:
                 cap = sheet.cell(row_s, data_col[data]).value
             except Exception as e:
                 break
             if '-' not in str(cap)[:1]:
                 cap = Extraction.capitalised(self, str(cap))
             for key in alias_data[data]:
                 if cap in alias_data[data][key]:
                     sheet_f.cell(row=row_s + 1,
                                  column=data_col[data] + 1).value = key
                     if data == 'VALVE MATERIAL' and material_check == 'YES' and key not in [
                             'F316', 'CF3', 'CF8'
                     ]:
                         try:
                             check, material = self.verify_material(
                                 convert_nb_2_i, size_type, key,
                                 sheet.cell(row_s,
                                            data_col['TEMPERATURE']).value,
                                 sheet.cell(row_s,
                                            data_col['VALVE SIZE']).value,
                                 a105, f22, f91, f92, wcb, wcc, wc9, c12a,
                                 c12ab)
                             if check:
                                 sheet_f.cell(row=row_s + 1,
                                              column=data_col[data] +
                                              1).value = material
                             else:
                                 sheet_f.cell(row=row_s + 1,
                                              column=data_col[data] +
                                              1).value = material
                                 sheet_f.cell(row=row_s + 1,
                                              column=data_col[data] +
                                              1).fill = redFill
                         except Exception as e:
                             print e
                             pass
                     break
     dest_wb.save(os.getcwd() + '/' + 'Output.xls')
Esempio n. 15
0
def wiki():
    question = request.args.get('question', 0, type=str)
    filtered_sentence = Parssing.get_main_words(datas_management, question)
    first_word = Parssing.wiki_firstword(datas_management, filtered_sentence)
    try:
        sentence_wiki = Extraction.wiki_datas(datas_extraction, first_word)
        random_sentence = random.choice(sentences_list.sentences_wiki)
        result_wiki = random_sentence + sentence_wiki
        return jsonify(result=result_wiki)
    except wikipedia.exceptions.DisambiguationError:
        return jsonify(
            result=
            "Je peux trouver des infos mais il y a beaucoup de choses avec le même nom, peux-tu préciser s'il te plait ?"
        )
    except wikipedia.exceptions.PageError:
        return jsonify(result="Hum, ça ne me dit vraiment rien...")
Esempio n. 16
0
    def format_n_filter(self, length, alias_data, sheet_row, data_column, size,
                        i2n):
        temp_l = []
        temp = ''
        for data in data_column:
            try:
                cap = sheet_row[data[1]]
            except Exception as e:
                print e
                continue

            if ('-' not in str(cap)
                    or '/' not in str(cap)) and data[0] != 'SIZE':
                cap = Extraction.capitalised(self, str(cap))
            for key in alias_data:
                for alias in alias_data[key]:
                    if cap in alias_data[key][alias]:
                        temp_l.append(alias)
            if data[0] == 'SIZE':
                if size == 'IN':
                    cap += '\"'
                    temp_l.append(int(self.i2n[cap]))
                else:
                    if cap != '':
                        try:
                            temp_l.append(int(cap))
                        except Exception as e:
                            print e
                            pass
            rat = ''
            if data[0] == 'RATING':
                for val in str(cap):
                    try:
                        if type(int(val)) == int or type(float(val)) == float:
                            rat += str(val)
                    except:
                        pass

                temp_l.append(int(rat))

        for i in range(length):
            try:
                temp += '-' + str(temp_l[i])
            except:
                temp += '-'
        temp = temp[1:] + '-'
        return temp
Esempio n. 17
0
	def GET(self, datafile, method):
		
		params = web.input(output="xml")
		"""Returns some extracted information from a file"""
		extractor = Extraction()
		data = ''
		txtfile = TMP_FOLDER + datafile + '.txt'
		
		"""Check if the file exists, if not return a 404"""
		if not os.path.exists(txtfile):
			return web.notfound()
		
		try:
			if method == 'text':
				txtfile = TMP_FOLDER + datafile + '.txt'
				web.header('Content-Type', 'text/text') # Set the Header
				return open(txtfile,"rb").read()
			elif method == 'file':
				pdffile = TMP_FOLDER + datafile
				typeFilterStatus = utilities.typeFilter(pdffile)
				web.header('Content-Type', typeFilterStatus) # Set the Header
				return open(pdffile,"rb").read()
			else:
				if method == 'header':
					data = data + extractor.extractHeaders(txtfile)
				elif method == 'citations':
					data = data + extractor.extractCitations(txtfile)
				elif method == 'body':
					data = data + extractor.extractBody(txtfile)
				elif method == 'keyphrases':
					data = data + extractor.extractKeyphrases(txtfile)
				#Print XML or JSON
				if params.output == 'xml' or params.output == '':
					web.header('Content-Type','text/xml; charset=utf-8')
					return utilities.printXML(data)
				elif params.output == 'json':
					jsondata = xmltodict.parse(data)
					web.header('Content-Type','text/json; charset=utf-8') 	
					return json.dumps(jsondata)
				else:
					web.ctx.status = '400'
					return 'Unsupported output format. Options are: "xml" (default) and "json"'
		
		except (IOError, OSError) as er: #Internal error, i.e. during extraction
			web.debug(er)
			return web.internalerror()
Esempio n. 18
0
    def read(self, fn):
        d = {}
        with open(fn) as fin:
            for line in fin:
                data = line.strip().split('\t')
                if len(data) >= 4:
                    arg1 = data[3]
                    rel = data[2]
                    arg_else = data[4:]
                    confidence = data[1]
                    text = data[0]

                    curExtraction = Extraction(pred=rel,
                                               head_pred_index=-1,
                                               sent=text,
                                               confidence=float(confidence))
                    curExtraction.addArg(arg1)
                    for arg in arg_else:
                        curExtraction.addArg(arg)
                    d[text] = d.get(text, []) + [curExtraction]
        self.oie = d
Esempio n. 19
0
# -*- coding: utf-8 -*-
# Import library
import numpy as np
from utils import Data
from extraction import Extraction
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, confusion_matrix

# Import dataset
#X, Y, lb = Data.numbers()
X, Y, lb = Extraction.huMoments()
#X, Y, lb = Extraction.lbp()
#X, Y, lb = Extraction.glcm()

n_iter = 10
scores_list = []
for i in range(n_iter):
    
    # k-Fold Cross-Validation
    #model = KNeighborsClassifier(n_neighbors=10)
    #cv_scores = cross_val_score(model, X, Y, cv=5)
    #scores = np.mean(cv_scores)
    #scores_list.append(scores)
     
    # Holdout
    x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
    #model = KNeighborsClassifier(n_neighbors=10)
    model = MLPClassifier(hidden_layer_sizes=(64, 128, 64),
                          max_iter=1000,
Esempio n. 20
0
    set_logger()

    if EXTRACTION:
        """
			The extraction phase involves the process of obtaining a set of documents from a repository, such as Scopus or the Web of Science, or it can involve the steps of scraping a 
			publisher’s website to retrieve full-text articles (typically in PDF format). Scopus generally provides publication abstracts, including all the meta-data (journal, authors, 
			affiliations, publication date) through various APIs. The upside of using an API is that publication content is easily obtained for a large number of documents simultaneously, 
			however, these APIs often do not provide full-text for all the publications or journals of interest. In these cases, scraping a publisher’s websites can be an alternative solution. 
			This process involves building many handcrafted crawlers, as each publisher lists their publications in a different manner on their website. Download limits should always be 
			respected when building such scripts. Another option would be to manually download articles, although such approaches might not be feasible if the document collection of interest 
			contains thousands or tens of thousands of articles. To enable a comparison of topics by time, or a comparison of topics by journals, it is important to store this information 
			alongside the content of the document.
		"""

        # instantiate Extraction class
        extraction = Extraction()

        # extract publications from NIPS website
        extraction.extract_publications()

    if PREPROCESSING:
        """
			The pre-processing phase can be seen as the process of going from a document source to an interpretable representation for the topic model algorithm. This phase is typically different 
			for full-text and abstract data. One of the main differences is that abstract data is often provided in a clean format, whereas full-text is commonly obtained by converting a PDF document 
			into its plain text representation.

			Within this phase, an important part is to filter out the content that is not important from a topic model's point-of-view, rather than from a human’s point-of-view. Abstract data 
			usually comes in a clean format of around 300--400 words, and little additional text is added to it; typically the copyright statement is the only text that should be removed. 
			In contrast, full-text articles can contain a lot of additional text that has been added by the publisher. This is article meta-data and boilerplate. It is important that such 
			additional text is removed, and various methods to do so exist. Examples include: deleting the first cover page; deleting the first n-bits of the content; using regular expressions 
			or other pattern matching techniques to find and remove additional text, or more advanced methods. For full-text articles, a choice can be made to also exclude the reference 
class SLAM_EKF():
    def __init__(self):
        # ros param
        self.robot_x = rospy.get_param('/slam/robot_x',0)
        self.robot_y = rospy.get_param('/slam/robot_y',0)
        self.robot_theta = rospy.get_param('/slam/robot_theta',0)
        ## ros param of mapping
        self.map_x_width = rospy.get_param('/slam/map_width')
        self.map_y_width = rospy.get_param('/slam/map_height')
        self.map_reso = rospy.get_param('/slam/map_resolution')
        self.map_cellx_width = int(round(self.map_x_width/self.map_reso))
        self.map_celly_width = int(round(self.map_y_width/self.map_reso))

        self.icp = ICP()
        self.ekf = EKF()
        self.extraction = Extraction()
        self.mapping = Mapping(self.map_cellx_width,self.map_celly_width,self.map_reso)

        # odom robot init states
        self.sensor_sta = [self.robot_x,self.robot_y,self.robot_theta]
        self.isFirstScan = True
        self.src_pc = []
        self.tar_pc = []

        # State Vector [x y yaw]
        self.xOdom = np.zeros((STATE_SIZE, 1))
        self.xEst = np.zeros((STATE_SIZE, 1))
        self.PEst = np.eye(STATE_SIZE)
        
        # map observation
        self.obstacle = []
        # radius
        self.obstacle_r = 10

        # ros topic
        self.laser_sub = rospy.Subscriber('/course_agv/laser/scan',LaserScan,self.laserCallback)
        self.location_pub = rospy.Publisher('ekf_location',Odometry,queue_size=3)
        self.odom_pub = rospy.Publisher('icp_odom',Odometry,queue_size=3)
        self.odom_broadcaster = tf.TransformBroadcaster()
        self.landMark_pub = rospy.Publisher('/landMarks',MarkerArray,queue_size=1)
        self.map_pub = rospy.Publisher('/slam_map',OccupancyGrid,queue_size=1)

    def laserCallback(self,msg):
        np_msg = self.laserToNumpy(msg)
        lm = self.extraction.process(np_msg)
        # u = self.calc_odometry(self.lm2pc(lm))
        u = self.calc_odometry(np_msg)
        z = self.observation(lm)
        self.xEst,self.PEst = self.ekf.estimate(self.xEst,self.PEst,z,u)

        # TODO
        # obs = self.u2T(self.xEst[0:3]).dot(np_msg)
        # pmap = self.mapping.update(obs[0], obs[1], self.xEst[0], self.xEst[1])

        # self.publishMap(pmap)
        self.publishLandMark(lm)
        self.publishResult()
        pass

    def observation(self,lm):
        landmarks = lm
        z = np.zeros((0, 3))
        for i in range(len(landmarks.id)):
            dx = landmarks.position_x[i]
            dy = landmarks.position_y[i]
            d = math.hypot(dx, dy)
            angle = self.ekf.pi_2_pi(math.atan2(dy, dx))
            zi = np.array([d, angle, i])
            z = np.vstack((z, zi))
        return z

    def calc_odometry(self,np_msg):
        if self.isFirstScan:
            self.tar_pc = np_msg
            self.isFirstScan = False
            return np.array([[0.0,0.0,0.0]]).T
        self.src_pc = np_msg
        transform_acc = self.icp.process(self.tar_pc,self.src_pc)
        self.tar_pc = np_msg
        return self.T2u(transform_acc)

    def laserToNumpy(self,msg):
        total_num = len(msg.ranges)
        pc = np.ones([3,total_num])
        range_l = np.array(msg.ranges)
        range_l[range_l == np.inf] = MAX_LASER_RANGE
        angle_l = np.linspace(msg.angle_min,msg.angle_max,total_num)
        pc[0:2,:] = np.vstack((np.multiply(np.cos(angle_l),range_l),np.multiply(np.sin(angle_l),range_l)))
        return pc

    def T2u(self,t):
        dw = math.atan2(t[1,0],t[0,0])
        u = np.array([[t[0,2],t[1,2],dw]])
        return u.T
    
    def u2T(self,u):
        w = u[2]
        dx = u[0]
        dy = u[1]

        return np.array([
            [ math.cos(w),-math.sin(w), dx],
            [ math.sin(w), math.cos(w), dy],
            [0,0,1]
        ])

    def lm2pc(self,lm):
        total_num = len(lm.id)
        dy = lm.position_y
        dx = lm.position_x
        range_l = np.hypot(dy,dx)
        angle_l = np.arctan2(dy,dx)
        pc = np.ones((3,total_num))
        pc[0:2,:] = np.vstack((np.multiply(np.cos(angle_l),range_l),np.multiply(np.sin(angle_l),range_l)))
        print("mdbg ",total_num)
        return pc

    def publishResult(self):
        # tf
        s = self.xEst.reshape(-1)
        q = tf.transformations.quaternion_from_euler(0,0,s[2])
        self.odom_broadcaster.sendTransform((s[0],s[1],0.001),(q[0],q[1],q[2],q[3]),
                            rospy.Time.now(),"ekf_location","world_base")
        # odom
        odom = Odometry()
        odom.header.stamp = rospy.Time.now()
        odom.header.frame_id = "world_base"

        odom.pose.pose.position.x = s[0]
        odom.pose.pose.position.y = s[1]
        odom.pose.pose.position.z = 0.001
        odom.pose.pose.orientation.x = q[0]
        odom.pose.pose.orientation.y = q[1]
        odom.pose.pose.orientation.z = q[2]
        odom.pose.pose.orientation.w = q[3]

        self.location_pub.publish(odom)

        s = self.xOdom
        q = tf.transformations.quaternion_from_euler(0,0,s[2])
        # odom
        odom = Odometry()
        odom.header.stamp = rospy.Time.now()
        odom.header.frame_id = "world_base"

        odom.pose.pose.position.x = s[0]
        odom.pose.pose.position.y = s[1]
        odom.pose.pose.position.z = 0.001
        odom.pose.pose.orientation.x = q[0]
        odom.pose.pose.orientation.y = q[1]
        odom.pose.pose.orientation.z = q[2]
        odom.pose.pose.orientation.w = q[3]

        self.odom_pub.publish(odom)
        
        print("mdbg ",self.xEst.shape)
        # self.laser_pub.publish(self.target_laser)
        pass

    def publishLandMark(self,msg):
        # msg = LandMarkSet()
        if len(msg.id) <= 0:
            return
        
        landMark_array_msg = MarkerArray()
        for i in range(len(msg.id)):
            marker = Marker()
            marker.header.frame_id = "course_agv__hokuyo__link"
            marker.header.stamp = rospy.Time(0)
            marker.ns = "lm"
            marker.id = i
            marker.type = Marker.SPHERE
            marker.action = Marker.ADD
            marker.pose.position.x = msg.position_x[i]
            marker.pose.position.y = msg.position_y[i]
            marker.pose.position.z = 0 # 2D
            marker.pose.orientation.x = 0.0
            marker.pose.orientation.y = 0.0
            marker.pose.orientation.z = 0.0
            marker.pose.orientation.w = 1.0
            marker.scale.x = 0.2
            marker.scale.y = 0.2
            marker.scale.z = 0.2
            marker.color.a = 0.5 # Don't forget to set the alpha
            marker.color.r = 0.0
            marker.color.g = 0.0
            marker.color.b = 1.0
            landMark_array_msg.markers.append(marker)


        for i in range(((self.xEst.shape[0]-STATE_SIZE)/2)):
            marker = Marker()
            marker.header.frame_id = "world_base"
            marker.header.stamp = rospy.Time(0)
            marker.ns = "lm"
            marker.id = i+len(msg.id)
            marker.type = Marker.SPHERE
            marker.action = Marker.ADD
            marker.pose.position.x = self.xEst[STATE_SIZE+i*2,0]
            marker.pose.position.y = self.xEst[STATE_SIZE+i*2+1,0]
            marker.pose.position.z = 0 # 2D
            marker.pose.orientation.x = 0.0
            marker.pose.orientation.y = 0.0
            marker.pose.orientation.z = 0.0
            marker.pose.orientation.w = 1.0
            marker.scale.x = 0.2
            marker.scale.y = 0.2
            marker.scale.z = 0.2
            marker.color.a = 0.5 # Don't forget to set the alpha
            marker.color.r = 0.0
            marker.color.g = 1.0
            marker.color.b = 0.0
            landMark_array_msg.markers.append(marker)
        self.landMark_pub.publish(landMark_array_msg)

    def publishMap(self,pmap):
        map_msg = OccupancyGrid()
        map_msg.header.seq = 1
        map_msg.header.stamp = rospy.Time().now()
        map_msg.header.frame_id = "map"

        map_msg.info.map_load_time = rospy.Time().now()
        map_msg.info.resolution = self.map_reso
        map_msg.info.width = self.map_cellx_width
        map_msg.info.height = self.map_celly_width
        map_msg.info.origin.position.x = -self.map_cellx_width*self.map_reso/2.0
        map_msg.info.origin.position.y = -self.map_celly_width*self.map_reso/2.0
        map_msg.info.origin.position.z = 0
        map_msg.info.origin.orientation.x = 0
        map_msg.info.origin.orientation.y = 0
        map_msg.info.origin.orientation.z = 0
        map_msg.info.origin.orientation.w = 1.0

        map_msg.data = list(pmap.T.reshape(-1)*100)
        
        self.map_pub.publish(map_msg)
Esempio n. 22
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from conversion import Conversion
from extraction import Extraction
from distance import Distance
from sys import argv

if __name__ == "__main__":

    y = argv[1]

    if "-c" in y:
        c = Conversion(y)
    elif "-e" in y:
        e = Extraction(y)
    elif y == "-d":
        d = Distance()
    else:
        print "There is no %r option. Try -cmp3 -cm4a -e or -d." % (y)
	def run(self):
		resume_extract = Extraction(self.file_path)
		resumes = resume_extract.get_resumes()
	
		degree_map = self.__build_degree_map(resumes, "computer")
		self.__plot_degree_data(degree_map)
Esempio n. 24
0
    song_features = db.song_features_collection  # #####REPLACE#####
    distance_features = db.distance_features_collection  # #####REPLACE#####
    #__________________________________________________________________________

    options, rem = getopt(argv[1:], 'c:e:d:n:g:h', [
        'conversion=', 'extraction=', 'distance', 'neighbour', 'graphdist',
        'help'
    ])

    for opt, arg in options:
        if opt in ('-c', '--conversion'):
            from conversion import Conversion
            c = Conversion(arg, rem[0])
        elif opt in ('-e', '--extraction'):
            from extraction import Extraction
            e = Extraction(arg, song_features)
        elif opt in ('-d', '--distance'):
            from distance import Distance
            d = Distance(song_features, distance_features)
        elif opt in ('-n', '--neighbour'):
            from neighbour import Neighbour
            n = Neighbour(song_features, distance_features)
        elif opt in ('-g', '--graphdist'):
            from graphdist import Graphdist
            g = Graphdist(song_features, distance_features)
        elif opt in ('-h', '--help'):
            print """The following options are available:
            -c, --conversion mp3/m4a
            =>  conversion of mp4 and mp3 files to wave files

            -e, --extraction
Esempio n. 25
0
class Localization():
    def __init__(self):
        self.icp = ICP()
        self.ekf = EKF()
        self.extraction = Extraction()

        # odom robot init states
        self.robot_x = rospy.get_param('/icp/robot_x',0)
        self.robot_y = rospy.get_param('/icp/robot_y',0)
        self.robot_theta = rospy.get_param('/icp/robot_theta',0)
        self.sensor_sta = [self.robot_x,self.robot_y,self.robot_theta]
        self.isFirstScan = True
        self.src_pc = []
        self.tar_pc = []

        # State Vector [x y yaw]
        self.xOdom = np.zeros((3,1))
        self.xEst = np.zeros((3,1))
        self.PEst = np.eye(3)
        
        # map observation
        self.obstacle = []
        # radius
        self.obstacle_r = 10

        # init map
        self.updateMap()
        # ros topic
        self.laser_sub = rospy.Subscriber('/course_agv/laser/scan',LaserScan,self.laserCallback)
        self.location_pub = rospy.Publisher('ekf_location',Odometry,queue_size=3)
        self.odom_pub = rospy.Publisher('icp_odom',Odometry,queue_size=3)
        self.odom_broadcaster = tf.TransformBroadcaster()
        self.landMark_pub = rospy.Publisher('/landMarks',MarkerArray,queue_size=1)

    def updateMap(self):
        print("debug: try update map obstacle")
        rospy.wait_for_service('/static_map')
        try:
            getMap = rospy.ServiceProxy('/static_map',GetMap)
            self.map = getMap().map
            # print(self.map)
        except:
            e = sys.exc_info()[0]
            print('Service call failed: %s'%e)
        # Update for planning algorithm
        map_data = np.array(self.map.data).reshape((-1,self.map.info.height)).transpose()
        tx,ty = np.nonzero((map_data > 20)|(map_data < -0.5))
        ox = (tx*self.map.info.resolution+self.map.info.origin.position.x)*1.0
        oy = (ty*self.map.info.resolution+self.map.info.origin.position.y)*1.0
        self.obstacle = np.vstack((ox,oy)).transpose()
        self.obstacle_r = self.map.info.resolution
        print("debug: update map obstacle success! ")

    def laserCallback(self,msg):
        u = self.calc_odometry(msg)
        z = self.ekf.odom_model(self.xEst,self.calc_map_observation(self.laserToNumpy(msg)))
        self.xEst,self.PEst = self.ekf.estimate(self.xEst,self.PEst,z,u)
        self.publishResult()
        pass

    def laserEstimation(self):
        # TODO
        return pc

    def calc_map_observation(self,msg):
        landmarks_src = self.extraction.process(msg,True)
        landmarks_tar = self.extraction.process(self.laserEstimation(),True)
        print("lm : ",len(landmarks_src.id),len(landmarks_tar.id))
        self.publishLandMark(landmarks_src,landmarks_tar)
        src_pc = self.lm2pc(landmarks_src)
        tar_pc = self.lm2pc(landmarks_tar)
        transform_acc = self.icp.process(tar_pc,src_pc)
        return self.T2u(transform_acc)

    def calc_odometry(self,msg):
        if self.isFirstScan:
            self.tar_pc = self.laserToNumpy(msg)
            self.isFirstScan = False
            return np.array([[0.0,0.0,0.0]]).T
        self.src_pc = self.laserToNumpy(msg)
        transform_acc = self.icp.process(self.tar_pc,self.src_pc)
        self.tar_pc = self.laserToNumpy(msg)
        return self.T2u(transform_acc)

    def lm2pc(self,lm):
        total_num = len(lm.id)
        dy = lm.position_y
        dx = lm.position_x
        range_l = np.hypot(dy,dx)
        angle_l = np.arctan2(dy,dx)
        pc = np.ones((3,total_num))
        pc[0:2,:] = np.vstack((np.multiply(np.cos(angle_l),range_l),np.multiply(np.sin(angle_l),range_l)))
        print("mdbg ",total_num)
        return pc
    def laserToNumpy(self,msg):
        total_num = len(msg.ranges)
        pc = np.ones([3,total_num])
        range_l = np.array(msg.ranges)
        angle_l = np.linspace(msg.angle_min,msg.angle_max,total_num)
        pc[0:2,:] = np.vstack((np.multiply(np.cos(angle_l),range_l),np.multiply(np.sin(angle_l),range_l)))
        return pc
    def T2u(self,t):
        dw = math.atan2(t[1,0],t[0,0])
        u = np.array([[t[0,2],t[1,2],dw]])
        return u.T

    def publishResult(self):
        # tf
        s = self.xEst.reshape(-1)
        q = tf.transformations.quaternion_from_euler(0,0,s[2])
        self.odom_broadcaster.sendTransform((s[0],s[1],0.001),(q[0],q[1],q[2],q[3]),
                            rospy.Time.now(),"ekf_location","world_base")
        # odom
        odom = Odometry()
        odom.header.stamp = rospy.Time.now()
        odom.header.frame_id = "world_base"

        odom.pose.pose.position.x = s[0]
        odom.pose.pose.position.y = s[1]
        odom.pose.pose.position.z = 0.001
        odom.pose.pose.orientation.x = q[0]
        odom.pose.pose.orientation.y = q[1]
        odom.pose.pose.orientation.z = q[2]
        odom.pose.pose.orientation.w = q[3]

        self.location_pub.publish(odom)

        s = self.xOdom
        q = tf.transformations.quaternion_from_euler(0,0,s[2])
        # odom
        odom = Odometry()
        odom.header.stamp = rospy.Time.now()
        odom.header.frame_id = "world_base"

        odom.pose.pose.position.x = s[0]
        odom.pose.pose.position.y = s[1]
        odom.pose.pose.position.z = 0.001
        odom.pose.pose.orientation.x = q[0]
        odom.pose.pose.orientation.y = q[1]
        odom.pose.pose.orientation.z = q[2]
        odom.pose.pose.orientation.w = q[3]

        self.odom_pub.publish(odom)

        # self.laser_pub.publish(self.target_laser)
        pass

    def publishLandMark(self,msg,msg2):
        # msg = LandMarkSet()
        if len(msg.id) <= 0:
            return
        
        landMark_array_msg = MarkerArray()
        for i in range(len(msg.id)):
            marker = Marker()
            marker.header.frame_id = "course_agv__hokuyo__link"
            marker.header.stamp = rospy.Time(0)
            marker.ns = "lm"
            marker.id = i
            marker.type = Marker.SPHERE
            marker.action = Marker.ADD
            marker.pose.position.x = msg.position_x[i]
            marker.pose.position.y = msg.position_y[i]
            marker.pose.position.z = 0 # 2D
            marker.pose.orientation.x = 0.0
            marker.pose.orientation.y = 0.0
            marker.pose.orientation.z = 0.0
            marker.pose.orientation.w = 1.0
            marker.scale.x = 0.2
            marker.scale.y = 0.2
            marker.scale.z = 0.2
            marker.color.a = 0.5 # Don't forget to set the alpha
            marker.color.r = 0.0
            marker.color.g = 0.0
            marker.color.b = 1.0
            landMark_array_msg.markers.append(marker)
        for i in range(len(msg2.id)):
            marker = Marker()
            marker.header.frame_id = "course_agv__hokuyo__link"
            marker.header.stamp = rospy.Time(0)
            marker.ns = "lm"
            marker.id = i+len(msg.id)
            marker.type = Marker.SPHERE
            marker.action = Marker.ADD
            marker.pose.position.x = msg2.position_x[i]
            marker.pose.position.y = msg2.position_y[i]
            marker.pose.position.z = 0 # 2D
            marker.pose.orientation.x = 0.0
            marker.pose.orientation.y = 0.0
            marker.pose.orientation.z = 0.0
            marker.pose.orientation.w = 1.0
            marker.scale.x = 0.2
            marker.scale.y = 0.2
            marker.scale.z = 0.2
            marker.color.a = 0.5 # Don't forget to set the alpha
            marker.color.r = 0.0
            marker.color.g = 1.0
            marker.color.b = 0.0
            landMark_array_msg.markers.append(marker)
        self.landMark_pub.publish(landMark_array_msg)
Esempio n. 26
0
class Segmentation_Pipeline():
    def __init__(self, cam_type='zed', realtime=False):

        if cam_type is 'zed':
            odom_file = '/export/patraval/robo_car_new_loop_all/zed_front/gps/fix.txt'
            self.initial_offset = 6070
            self.offset_length = 6634

        elif cam_type is 'pg':
            # odom_file = '/export/patraval/robo_car_loop2/pg_cam/gps/fix.txt'
            odom_file = '/export/patraval/robo_loop_pg_only/pg_cam/gps/fix.txt'

        self.initial_offset = 5953
        self.offset_length = 6522

        self.initial_offset = 1600  # pg  #1417 #-- zed
        # self.initial_offset = 1948

        self.offset_length = 6000

        # self.initial_offset = 3510
        # self.offset_length = 6634
        print(self.initial_offset, self.offset_length)
        self.transformer = State_Transition(odom_file, cam_type,
                                            self.initial_offset, realtime)
        self.extractor = Extraction(cam_type)

    def start_process_disk(self):

        for i in range(self.initial_offset, self.offset_length):
            points = self.extractor.execute_extraction(i)
            self.transformer.point_transformation(points, i)
            print("Processed: ", i)
            # self.extractor.display_final_image(self.transformer.vehicle_coords_base, self.transformer.pop_index, points)
            # raw_input("Press Enter to continue...")

        def start_process_realtime2(self, index=None):
            for index in range(self.initial_offset, self.offset_length):
                points = self.extractor.execute_extraction(index)
                self.transformer.point_transformation(points, index)
                print("Processed: ", index)
                # self.extractor.display_final_image(self.transformer.vehicle_coords_base,  points, self.transformer.pop_index)

    def start_process_realtime(self, index):

        points = self.extractor.execute_extraction(index + self.initial_offset)
        self.transformer.point_transformation(points,
                                              index + self.initial_offset)
        # print("Processed: ", index+70+6000)
        self.extractor.display_final_image(
            self.transformer.vehicle_coords_base, points,
            self.transformer.pop_index)

    def start_process_cluster_realtime2(self, index=None):
        for index in range(self.initial_offset, self.offset_length):
            print("Processing frame: ", index)
            points, points_disp = self.extractor.execute_extraction_cluster(
                index)
            self.transformer.cluster_transformation(points, points_disp, index)

        self.transformer.init_origin_state(self.initial_offset)
        for index in range(self.initial_offset, self.offset_length):
            print("logging frame: ", index)

            self.transformer.log_transformations(
                self.transformer.logged_cluster, index)
Esempio n. 27
0
class SLAM_Localization(LandmarkLocalization, EKF_SLAM):
    alpha = 3.0  # factor in estimating covariance.

    def __init__(self, nodeName="slam_ekf"):
        super(SLAM_Localization, self).__init__(nodeName)

        self.icp = SubICP()
        self.extraction = Extraction()

        self.isFirstScan = True
        self.laser_count = 0
        # interval
        self.laser_interval = 5
        # State Vector [x y yaw].T, column vector.
        # self.xOdom = np.zeros((STATE_SIZE,1))
        self.xEst = np.zeros((STATE_SIZE, 1))
        # Covariance. Initial is very certain.
        self.PEst = np.zeros((STATE_SIZE, STATE_SIZE))
        # landMark Estimation. Like former self.tar_pc
        self.lEst = np.zeros((LM_SIZE, 0))  # lEst should be of 2*N size

        # ros topic
        self.laser_sub = rospy.Subscriber('/course_agv/laser/scan', LaserScan,
                                          self.laserCallback)
        # self.location_pub = rospy.Publisher('ekf_location',Odometry,queue_size=3)

        ## localization parameters
        # minimum landmark matches to update.
        self.min_match = int(rospy.get_param('/slam/min_match', 2))
        # minimum number of points for a landmark cluster
        self.extraction.landMark_min_pt = int(
            rospy.get_param('/slam/landMark_min_pt', 1))
        # maximum radius to be identified as landmark
        self.extraction.radius_max_th = float(
            rospy.get_param('/slam/radius_max_th', 0.4))

        OBSTACLE_RADIUS = 0.35

    # feed icp landmark instead of laser.
    def laserCallback(self, msg):
        print('------seq:  ', msg.header.seq)
        if self.isFirstScan:
            # feed in landmarks.
            # self.icp.tar_pc=self.extraction.process(msg)
            self.icp.tar_pc = self.icp.laserToNumpy(msg)
            self.isFirstScan = False
            return

        # Update once every 5 laser scan because the we cannot distinguish rotation if interval is too small.
        self.laser_count += 1
        if self.laser_count < self.laser_interval:
            return

        # Updating process
        self.laser_count = 0
        # z is the landmarks in self frame as a 2*n array.
        z = self.extraction.process(msg, True)
        self.publishLandMark(z, "b")
        # relative displacement
        u = self.calc_odometry(msg)

        # xEst,lEst,PEst is both predicted and updated in the ekf.
        self.xEst, self.lEst, self.PEst = self.estimate(
            self.xEst, self.lEst, self.PEst, z, u)

        self.publishResult()
        return

    def calc_odometry(self, msg):
        '''
        Let icp handle the odometry, returns a relative odometry u.
        '''
        # laser callback is manually fed by its owner class.
        return self.icp.laserCallback(msg)

    # EKF virtual function.
    def observation_model(self, xEst, lEst):
        '''
        returns an 2*n column vector list.
        [z1,z2,....zn]
        '''
        rotation = tf.transformations.euler_matrix(0, 0, xEst[2, 0])[0:2, 0:2]
        z = np.dot(rotation.T, lEst - xEst[0:2])
        return z

    def estimate(self, xEst, lEst, PEst, z, u):

        G, Fx = self.jacob_motion(xEst, lEst, u)
        # FIXME: the G and Fx transpose is confused. Thanks god they are all symmetric ones...
        covariance = np.dot(G, np.dot(PEst, G.T)) + np.dot(
            Fx, np.dot(Cx, Fx.T))

        # Predict
        xPredict = self.odom_model(xEst, u)

        zEst = self.observation_model(xPredict, lEst)
        self.publishLandMark(zEst,
                             color="r",
                             namespace="estimate",
                             frame=self.nodeName)

        neighbour = self.icp.findNearest(z, zEst)
        # Predicted
        zPredict = zEst[:, neighbour.tar_indices]
        self.publishLandMark(zPredict,
                             color="g",
                             namespace="paired",
                             frame=self.nodeName)

        lSize = np.size(lEst, 1)
        zSize = np.size(z, 1)
        # how many observed landmarks are missed, how many new ones will be added.
        missedIndices = sorted(
            list(set(range(zSize)) - set(neighbour.src_indices)))
        paired = len(neighbour.src_indices)
        missed = len(missedIndices)

        neighbour.src_indices += missedIndices
        # the newly-added landmarks
        neighbour.tar_indices += range(lSize, missed + lSize)

        zObserved = z[:, neighbour.src_indices]
        # add new landmarks to newL (new lEst)
        # newL=np.repeat(xPredict[0:2],missed,axis=1)
        # delicately select the spawning position...
        newL = np.dot(
            tf.transformations.euler_matrix(0, 0, xPredict[2, 0])[0:2, 0:2],
            z[:, missedIndices]) + xPredict[0:2]
        newL = np.hstack((lEst, newL))
        zEst = self.observation_model(xPredict, newL)
        zPredict = zEst[:, neighbour.tar_indices]
        # the newly-added landmarks' uncertainty is very large
        # block is so powerful!
        covariance = scilinalg.block_diag(covariance,
                                          np.diag([INF] * LM_SIZE * missed))
        yPredict = self.XLtoY(xPredict, newL)

        variance = self.alpha / (zSize + self.alpha) * OBSTACLE_RADIUS
        print("\n\npaired: {} variance: {} missed: {} landmarks: {}".format(
            paired, variance, missed, lSize + missed))
        if zSize < self.min_match:
            print("Observed landmarks are too little to execute update.")
            #  only update according to the prediction stage.
            return xPredict, newL, covariance

        m = self.jacob_h(newL, neighbour, xPredict)

        # z (2*n)array->(2n*1) array
        zPredict = np.vstack(np.hsplit(zPredict, np.size(zPredict, 1)))
        zObserved = np.vstack(np.hsplit(zObserved, np.size(zObserved, 1)))
        # print("delta z: \n{}\n".format(zObserved-zPredict))

        # print("covariance:\n{}\n".format(covariance))
        # Karman factor. Universal formula.
        K = np.dot(
            np.dot(covariance, m.T),
            np.linalg.inv(
                np.dot(m, np.dot(covariance, m.T)) +
                np.diag([variance**2] * 2 * zSize)))

        # Update
        fix = np.dot(K, zObserved - zPredict)
        # print("fix: \n{}\n\n".format(fix))
        yEst = yPredict + fix
        PEst = covariance - np.dot(K, np.dot(m, covariance))
        xEst, lEst = self.YtoXL(yEst)
        return xEst, lEst, PEst

    def XLtoY(self, x, l):
        # split y (2*N) to 2N*1
        y = np.vstack(np.hsplit(l, np.size(l, 1)))
        y = np.vstack((x, y))
        return y

    def YtoXL(self, y):
        x, l = np.vsplit(y, [STATE_SIZE])
        l = np.hstack(np.vsplit(l, np.size(l) / LM_SIZE))
        return x, l
Esempio n. 28
0
    def main_code(self, fname):
        wb = open_workbook(fname, formatting_info=True)
        standard_list = open_workbook(os.getcwd() + '/Standard Values.xlsx')
        standard_table = Extraction.extract_data(self, standard_list)
        ptm_data = Extraction.extract_data(self, standard_list, 'alias_col')
        font = wb.font_list
        sheet_name = []
        cordinates = Extraction.search_titles(self, wb, standard_table)
        cordinates_row = Extraction.extract_rows(self, cordinates)
        #below code finds the max column
        max_col = 1
        max_col_sheet = ''
        row_x = 0
        for sheetname in cordinates.keys():
            for value in cordinates[sheetname]:
                x, y = value
                if y > max_col and y != '':
                    max_col = y
                    max_col_sheet = sheetname
                    row_x = x
        sheet_m = wb.sheet_by_name(max_col_sheet)
        header = {}
        cordinates_dict = Extraction.get_cordinates(self, cordinates_row, wb)
        work = Workbook()
        w_sheet = work.active
        w_sheet.title = 'Standard Sheet'
        counter = 0
        heading_count = 0
        x = row_x
        for col in range(sheet_m.ncols):
            if col < sheet_m.ncols - 1 and sheet_m.cell(
                    x, col).value != '' and sheet_m.cell(
                        x, col + 1).value == '' and sheet_m.cell(
                            x + 1, col).value != '':
                w_sheet.cell(row=counter + 1,
                             column=col + 1).value = sheet_m.cell(x + 1,
                                                                  col).value
                header[sheet_m.cell(x + 1, col).value] = col
            elif col < sheet_m.ncols - 1 and sheet_m.cell(
                    x, col).value == '' and x < sheet_m.nrows - 1:
                if sheet_m.cell(x, col + 1).value != '' and sheet_m.cell(
                        x + 1, col).value != '':
                    w_sheet.cell(row=counter + 1,
                                 column=col + 1).value = sheet_m.cell(
                                     x + 1, col).value
                    header[sheet_m.cell(x + 1, col).value] = col
                elif sheet_m.cell(x, col + 1).value == '' and sheet_m.cell(
                        x + 1, col).value != '':
                    w_sheet.cell(row=counter + 1,
                                 column=col + 1).value = sheet_m.cell(
                                     x + 1, col).value
                    header[sheet_m.cell(x + 1, col).value] = col
                elif sheet_m.cell(x, col + 1).value != '' and sheet_m.cell(
                        x - 1, col).value != '':
                    w_sheet.cell(row=counter + 1,
                                 column=col + 1).value = sheet_m.cell(
                                     x - 1, col).value
                    header[sheet_m.cell(x - 1, col).value] = col
                elif sheet_m.cell(x, col + 1).value == '' and sheet_m.cell(
                        x - 1, col).value != '':
                    w_sheet.cell(row=counter + 1,
                                 column=col + 1).value = sheet_m.cell(
                                     x - 1, col).value
                    header[sheet_m.cell(x - 1, col).value] = col
            else:
                if sheet_m.cell(x, col).value != '':
                    w_sheet.cell(row=counter + 1, column=col +
                                 1).value = sheet_m.cell(x, col).value
                    header[sheet_m.cell(row_x, col).value] = col
        counter += 1
        for sheet in wb.sheets():
            if sheet.visibility == 0:
                col_n = None
                second_max = 0
                print sheet.name
                counting = 1
                order_row = (cordinates_dict[sheet.name].keys())
                order_row.sort()
                for tempp in range(len(order_row) - 1, -1, -1):
                    if order_row[tempp] < sheet.nrows:
                        second_max = order_row[tempp]
                        break
                for row_n in order_row:  #row_n is row in increasing form
                    row_count = row_n
                    if row_count < second_max:  #counting is the number of rows in a sheet
                        while row_count < order_row[counting]:
                            for column_name in cordinates_dict[
                                    sheet.name][row_n]:
                                cell_formatting = wb.xf_list[
                                    sheet.cell_xf_index(
                                        row_count, column_name[1])]
                                # tests for the presence of striked out cells and skips them
                                check2 = font[
                                    cell_formatting.font_index].struck_out != 1
                                if not check2:
                                    if sheet.cell(
                                            row_count, column_name[1]
                                    ).ctype == XL_CELL_BLANK or sheet.cell(
                                            row_count, column_name[1]
                                    ).ctype == XL_CELL_EMPTY:
                                        check2 = True

                                if check2:
                                    check = Filter.non_zero_row(
                                        self, sheet.row_values(row_count),
                                        cordinates_dict[sheet.name][row_n],
                                        ptm_data['QUANTITY'])
                                    if check:
                                        try:
                                            w_sheet.cell(
                                                row=counter + 1,
                                                column=header[column_name[0]] +
                                                1).value = sheet.cell(
                                                    row_count,
                                                    column_name[1]).value
                                        except Exception as e:
                                            cap1 = Extraction.capitalised(
                                                self, column_name[0])
                                            for key in header.keys():
                                                cap2 = Extraction.capitalised(
                                                    self, key)
                                                for tempi in ptm_data:
                                                    for j in ptm_data[tempi]:

                                                        if (cap1 in j
                                                                or j in cap1
                                                            ) and (j in cap2
                                                                   or tempi
                                                                   in cap2):
                                                            col_n = key
                                                            break
                                                    if col_n == key:
                                                        break
                                                if col_n == key:
                                                    break
                                            w_sheet.cell(
                                                row=counter + 1,
                                                column=header[key] +
                                                1).value = sheet.cell(
                                                    row_count,
                                                    column_name[1]).value
                                    else:
                                        break
                                else:
                                    break
                            row_count += 1
                            if check2 and check:
                                counter += 1
                    elif row_count == second_max:
                        while row_count < sheet.nrows:
                            for column_name in cordinates_dict[
                                    sheet.name][row_n]:
                                cell_formatting = wb.xf_list[
                                    sheet.cell_xf_index(
                                        row_count, column_name[1])]
                                check2 = font[
                                    cell_formatting.font_index].struck_out != 1
                                if not check2:
                                    if sheet.cell(
                                            row_count, column_name[1]
                                    ).ctype == XL_CELL_BLANK or sheet.cell(
                                            row_count, column_name[1]
                                    ).ctype == XL_CELL_EMPTY:
                                        check2 = True
                                if check2:

                                    check = Filter.non_zero_row(
                                        self, sheet.row_values(row_count),
                                        cordinates_dict[sheet.name][row_n],
                                        ptm_data['QUANTITY'])
                                    if check:
                                        try:
                                            w_sheet.cell(
                                                row=counter + 1,
                                                column=header[column_name[0]] +
                                                1).value = sheet.cell(
                                                    row_count,
                                                    column_name[1]).value
                                        except Exception as e:
                                            cap1 = Extraction.capitalised(
                                                self, column_name[0])
                                            for key in header.keys():
                                                cap2 = Extraction.capitalised(
                                                    self, key)
                                                if cap1 == cap2:
                                                    break
                                                else:
                                                    for i in ptm_data:
                                                        for j in ptm_data[i]:
                                                            if (cap1 in j or j
                                                                    in cap1
                                                                ) and (
                                                                    j in cap2
                                                                    or i
                                                                    in cap2):
                                                                col_n = key
                                                                break
                                                        if col_n == key:
                                                            break
                                                    if col_n == key:
                                                        break
                                            w_sheet.cell(
                                                row=counter + 1,
                                                column=header[key] +
                                                1).value = sheet.cell(
                                                    row_count,
                                                    column_name[1]).value
                                    else:
                                        break
                                else:
                                    break
                            if check2 and check:
                                counter += 1
                            row_count += 1
                        counting += 1

                    else:
                        continue
                    counting += 1
        print 'writing done'
        print sheet.name
        work.save(os.getcwd() + '/' + 'Output.xls')
        wb2 = open_workbook(os.getcwd() + '/' + 'Output.xls')
        sheet = wb2.sheet_by_index(0)
        data_column = {}
        for vtemp in header:
            stamp = 0
            for key in ptm_data:
                for value in ptm_data[key]:
                    cap = str(Extraction.capitalised(self, vtemp))
                    cap2 = str(Extraction.capitalised(self, value))
                    if cap != '' and cap == cap2 or cap2 in cap or cap in cap2:
                        data_column[key] = header[vtemp]
                        stamp = 1
                        break
                if stamp == 1:
                    break
        '''below code is used to construct a nested dictionary of aliases for material, operation, valve type, etc'''
        alias_s = standard_list.sheet_by_name('alias')
        alias_db = standard_list.sheet_by_name('db alias')
        size_con = standard_list.sheet_by_name('conversion')
        convert_nb_2_i = {}
        convert_i_2_nb = {}
        for row in range(size_con.nrows):
            word = ''
            for val in str(size_con.cell(row, 1).value):
                word += val
            convert_nb_2_i[size_con.cell(row, 0).value] = word
            convert_i_2_nb[word] = size_con.cell(row, 0).value
        if self.size_value == 'IN':
            conversion_data = convert_nb_2_i
            for row in range(1, sheet.nrows):
                d_value = ''
                value_string = str(
                    sheet.cell(row, data_column['VALVE SIZE']).value).lstrip()
                for i in str(value_string):
                    if i in '0123456789':
                        d_value += i
                    else:
                        break
                try:
                    w_sheet.cell(
                        row=row + 1, column=data_column['VALVE SIZE'] +
                        1).value = conversion_data[int(d_value)].strip("\"")
                except:
                    pass
        elif self.size_value == 'NB':
            conversion_data = convert_i_2_nb
            for row in range(1, sheet.nrows):
                d_value = ''
                value_string = str(
                    sheet.cell(
                        row,
                        data_column['VALVE SIZE']).value).strip().strip("\"")
                for i in str(value_string):
                    if i in '0123456789':
                        d_value += i
                    else:
                        break
                try:
                    w_sheet.cell(row=row + 1,
                                 column=data_column['VALVE SIZE'] +
                                 1).value = conversion_data[int(d_value)]
                except:

                    pass
        alias_data = {}
        for col in range(alias_s.ncols):
            alias_d = {}
            temp = []
            for row in range(1, alias_s.nrows):
                if alias_s.cell(row, col).value != '':
                    if alias_s.cell(row, col).value != '-' and alias_s.cell(
                            row, col).value != '--':
                        temp.append(
                            str(
                                Extraction.capitalised(
                                    self,
                                    alias_s.cell(row, col).value)))
                    else:
                        temp.append(str(alias_s.cell(row, col).value))
                elif alias_s.cell(row, col).value == '' and len(temp) >= 1:
                    alias_d[temp[0]] = temp
                    temp = []
            alias_data[alias_s.cell(0, col).value] = alias_d
        alias_data2 = {}
        for col in range(alias_db.ncols):
            alias_d = {}
            temp = []
            for row in range(1, alias_db.nrows):
                if alias_db.cell(row, col).value != '':
                    if alias_db.cell(row, col).value != '-' and alias_db.cell(
                            row, col).value != '--':
                        temp.append(
                            str(
                                Extraction.capitalised(
                                    self,
                                    alias_db.cell(row, col).value)))
                    else:
                        temp.append(str(alias_db.cell(row, col).value))
                elif alias_db.cell(row, col).value == '' and len(temp) >= 1:
                    alias_d[temp[0]] = temp
                    temp = []
            alias_data2[alias_db.cell(0, col).value] = alias_d
        C = 0
        '''Below code is used to standardise the material name given in the original excel sheetusing alias excel sheet'''
        work.save(os.getcwd() + '/' + 'Output.xls')
        wb2 = open_workbook(os.getcwd() + '/' + 'Output.xls')
        Filter.formatting_data(self, convert_nb_2_i, self.size_value, wb2,
                               work, data_column, alias_data,
                               self.material_value, self.a105, self.f22,
                               self.f91, self.f92, self.wcb, self.wcc,
                               self.wc9, self.c12a, self.c12ab)
        work.save(os.getcwd() + '/' + 'Output.xls')
        wb2 = open_workbook(os.getcwd() + '/' + 'Output.xls')
        sheet = wb2.sheet_by_index(0)
        if self.rating_value == 'YES':
            cliet = Client('enter ip address for the soap service')
            r = []
            w_sheet.cell(row=1, column=sheet.ncols + 1).value = "RATING(S)"
            data_column['VALVE RATING'] = sheet.ncols
            print("FETCHING RATING FROM SERVER")
            for rows in range(1, sheet.nrows):
                w_sheet.cell(
                    row=rows + 1, column=sheet.ncols +
                    1).value = cliet.service.GetMinClassRating(
                        sheet.cell(rows, data_column['TEMPERATURE']).value,
                        sheet.cell(rows, data_column['PRESSURE']).value,
                        sheet.cell(rows, data_column['VALVE MATERIAL']).value)

        work.save(os.getcwd() + '/' + 'Output.xls')
        order = []
        ssd = standard_list.sheet_by_name('standard')
        for col in range(ssd.ncols):
            order.append(ssd.cell(0, col).value)

        final_s = work.create_sheet('final')
        regret_s = work.create_sheet('regret')
        wb2 = open_workbook(os.getcwd() + '/' + 'Output.xls')
        sheet = wb2.sheet_by_index(0)
        counter = 1
        counter2 = 1
        head = 1
        redFill = PatternFill(start_color='00CED1FF',
                              end_color='00CED1FF',
                              fill_type='solid')
        for row in range(sheet.nrows):
            count = 0

            while count < len(order):
                if head == 1:
                    for col in range(len(order)):
                        if order[col] in data_column:
                            final_s.cell(row=counter,
                                         column=col + 3).value = order[col]
                            final_s.cell(row=counter,
                                         column=2).value = 'VALVE PROJECT'
                            final_s.cell(row=counter,
                                         column=1).value = 'VALVE SCHEDULE'
                    count += 1
                    counter += 1
                    head = 0
                elif sheet.cell(row, data_column[order[count]]
                                ).value in alias_data['VALVE TYPE'].keys():
                    for col in range(len(order)):
                        if order[col] in data_column:
                            if w_sheet.cell(row=row + 1,
                                            column=data_column[order[col]] +
                                            1).fill == redFill:
                                final_s.cell(row=counter,
                                             column=col + 3).fill = redFill
                                final_s.cell(row=counter, column=col +
                                             3).value = sheet.cell(
                                                 row,
                                                 data_column[order[col]]).value
                                final_s.cell(row=counter,
                                             column=2).value = self.vp
                                final_s.cell(row=counter,
                                             column=1).value = self.vs

                            else:
                                final_s.cell(row=counter, column=col +
                                             3).value = sheet.cell(
                                                 row,
                                                 data_column[order[col]]).value
                                final_s.cell(row=counter,
                                             column=2).value = self.vp
                                final_s.cell(row=counter,
                                             column=1).value = self.vs
                    counter += 1
                    count += 1
                    break
                else:
                    for col in range(len(order)):
                        if order[col] in data_column:
                            if w_sheet.cell(row=row + 1,
                                            column=data_column[order[col]] +
                                            1).fill == redFill:
                                regret_s.cell(row=counter2,
                                              column=col + 3).fill = redFill
                                regret_s.cell(
                                    row=counter2,
                                    column=col + 3).value = sheet.cell(
                                        row, data_column[order[col]]).value
                                regret_s.cell(row=counter2,
                                              column=2).value = self.vp
                                regret_s.cell(row=counter2,
                                              column=1).value = self.vs
                            else:
                                regret_s.cell(
                                    row=counter2,
                                    column=col + 3).value = sheet.cell(
                                        row, data_column[order[col]]).value
                                regret_s.cell(row=counter2,
                                              column=2).value = self.vp
                                regret_s.cell(row=counter2,
                                              column=1).value = self.vs
                    counter2 += 1
                    count += 1
                    break
        work.save(os.getcwd() + '/' + 'Output.xls')
        wb2 = open_workbook(os.getcwd() + '/' + 'Output.xls')
        Part_number_extraction(alias_data2, standard_list, wb2, work,
                               self.size_value, convert_i_2_nb,
                               self.part_value, self.part_value2,
                               convert_nb_2_i)
        work.save(os.getcwd() + '/' + 'Output.xls')
Esempio n. 29
0
    def read(self, fn):
        d = defaultdict(lambda: [])
        with open(fn) as fin:
            data = json.load(fin)
        for sentence in data:
            tuples = data[sentence]
            for t in tuples:
                if t["pred"].strip() == "<be>":
                    rel = "[is]"
                else:
                    rel = t["pred"].replace("<be> ", "")
                confidence = 1

                curExtraction = Extraction(pred=rel,
                                           head_pred_index=None,
                                           sent=sentence,
                                           confidence=float(confidence),
                                           index=None)
                if t["arg0"] != "":
                    curExtraction.addArg(t["arg0"])
                if t["arg1"] != "":
                    curExtraction.addArg(t["arg1"])
                if t["arg2"] != "":
                    curExtraction.addArg(t["arg2"])
                if t["arg3"] != "":
                    curExtraction.addArg(t["arg3"])
                if t["temp"] != "":
                    curExtraction.addArg(t["temp"])
                if t["loc"] != "":
                    curExtraction.addArg(t["loc"])

                d[sentence].append(curExtraction)
        self.oie = d
Esempio n. 30
0
from flask import Flask, render_template, request, jsonify

import sentences_list
import random
from extraction import Extraction
from parssing import Parssing
import wikipedia
app = Flask(__name__)

app.config['SECRET_KEY'] = '7TKUKe09wW1PlrtSL066lsN18uWA7iuO'

# Instances creation
datas_extraction = Extraction()
datas_management = Parssing()


@app.route('/')
def home():
    return render_template('index.html', title="Bienvenue chez GrandPy Bot")


@app.route('/_answer')
def answer():
    question = request.args.get('question', 0, type=str)
    return jsonify(result=question)


@app.route('/_address')
def address():
    question = request.args.get('question', 0, type=str)
    filtered_sentence = Parssing.get_main_words(datas_management, question)
Esempio n. 31
0
from extraction import Extraction
import time
startTime = time.time()
print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(startTime))
extract1 = Extraction()
extract1.loadData()
extract1.run()
endTime = time.time()
print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(endTime))
seconds = endTime - startTime
print seconds
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
print "%d:%02d:%02d" % (h, m, s)