コード例 #1
0
    def _send_table_description(self, variant):
        """
        Send the table description to Qlik as meta data.
        Used when the SSE is called from the Qlik load script.
        """

        # Set up the table description to send as metadata to Qlik
        self.table = SSE.TableDescription()
        self.table.name = "SSE-Response-spaCy"
        self.table.numberOfRows = len(self.response_df)

        # Set up fields for the table
        if variant == "entities":
            self.table.fields.add(name="key")
            self.table.fields.add(name="entity")
            self.table.fields.add(name="start", dataType=1)
            self.table.fields.add(name="end", dataType=1)
            self.table.fields.add(name="type")
            self.table.fields.add(name="description")
        elif variant == "metrics":
            self.table.fields.add(name="model_name")
            self.table.fields.add(name="subset")
            self.table.fields.add(name="metric")
            self.table.fields.add(name="value", dataType=1)

        # Debug information is printed to the terminal and logs if the paramater debug = true
        if self.debug:
            self._print_log(5)

        # Send table description
        table_header = (('qlik-tabledescription-bin',
                         self.table.SerializeToString()), )
        self.context.send_initial_metadata(table_header)
コード例 #2
0
ファイル: _common.py プロジェクト: zerubroberts/qlik-py-tools
    def _send_table_description(self, variant):
        """
        Send the table description to Qlik as meta data.
        Used when the SSE is called from the Qlik load script.
        """
        
        # Set up the table description to send as metadata to Qlik
        self.table = SSE.TableDescription()
        self.table.name = "SSE-Response"
        self.table.numberOfRows = len(self.response_df)

        # Set up fields for the table
        if variant == "apriori":
            'rule', 'rule_lhs', 'rule_rhs', 'support', 'confidence', 'lift'
            self.table.fields.add(name="rule")
            self.table.fields.add(name="rule_lhs")
            self.table.fields.add(name="rule_rhs")
            self.table.fields.add(name="support", dataType=1)
            self.table.fields.add(name="confidence", dataType=1)
            self.table.fields.add(name="lift", dataType=1)
                
        # Debug information is printed to the terminal and logs if the paramater debug = true
        if self.debug:
            self._print_log(5)
            
        # Send table description
        table_header = (('qlik-tabledescription-bin', self.table.SerializeToString()),)
        self.context.send_initial_metadata(table_header)
コード例 #3
0
    def _max_of_columns_2(request, context):
        """
        Find max of each column. This is a table function.
        :param request: an iterable sequence of RowData
        :param context:
        :return: a table with numerical values, two columns and one row
        """

        result = [_MINFLOAT] * 2

        # Iterate over bundled rows
        for request_rows in request:
            # Iterating over rows
            for row in request_rows.rows:
                # Retrieve the numerical value of each parameter
                # and update the result variable if it's higher than the previously saved value
                for i in range(0, len(row.duals)):
                    result[i] = max(result[i], row.duals[i].numData)

        # Create an iterable of dual with numerical value
        duals = iter([SSE.Dual(numData=r) for r in result])

        # Set and send Table header
        table = SSE.TableDescription(name='MaxOfColumns', numberOfRows=1)
        table.fields.add(name='Max1', dataType=SSE.NUMERIC)
        table.fields.add(name='Max2', dataType=SSE.NUMERIC)
        md = (('qlik-tabledescription-bin', table.SerializeToString()), )
        context.send_initial_metadata(md)

        # Yield the row data constructed
        yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
コード例 #4
0
ファイル: _prophet.py プロジェクト: udayshankarv/qvpytoolssse
    def _send_table_description(self):
        """
        Send the table description to Qlik as meta data.
        Only used when the SSE is called from the Qlik load script.
        """

        # Set up the table description to send as metadata to Qlik
        self.table = SSE.TableDescription()
        self.table.name = "ProphetForecast"
        self.table.numberOfRows = len(self.response)

        # Set up fields for the table
        if self.is_seasonality_request:
            for col in self.response.columns:
                self.table.fields.add(name=col, dataType=1)
        elif self.result_type == 'all':
            for col in self.response.columns:
                dataType = 0 if col == 'ds' else 1
                self.table.fields.add(name=col, dataType=dataType)
        else:
            self.table.fields.add(name="ds", dataType=0)
            self.table.fields.add(name=self.result_type, dataType=1)

        if self.debug:
            self._print_log(6)

        # Send table description
        table_header = (('qlik-tabledescription-bin',
                         self.table.SerializeToString()), )
        self.context.send_initial_metadata(table_header)
コード例 #5
0
    def _send_table_description(self, func=None):
        """
        Send the table description to Qlik as meta data.
        Only used when the SSE is called from the Qlik load script.
        """
        
        # Set up the table description to send as metadata to Qlik
        self.table = SSE.TableDescription()
        
        if func is None:
            self.table.name = "SAS_Dataset"

            if isinstance(self.reader, pd.DataFrame):
                self.sample_data = self.reader.head(5)
            else:
                # Read the SAS file to get sample data
                sample_response = pd.read_sas(self.filepath, format=self.format, encoding=self.encoding, chunksize=5)
            
                # Get the first chunk of data as a Pandas DataFrame
                self.sample_data = sample_response.__next__()

                # Close the file reader
                sample_response.close()
            
            # Fetch field labels from SAS variable attributes if required
            # This may fail for wide tables due to meta data limits. For such cases use the get_labels function.
            if self.labels:
                # Use the sas7bdat library to read the file
                handle = SAS7BDAT(self.filepath, skip_header=False)

                # Get labels for the variables
                labels = [col.label.decode(self.encoding) for col in handle.columns]

                # Close the file reader
                handle.close()
            else:
                # Get the variable names from the sample data
                labels = self.sample_data.columns
            
            # Set field names 
            for col in labels:
                # Set up fields for the table
                self.table.fields.add(name=col)
            
            if self.debug:
                self._print_log(2)
        
        elif func == "get_labels":
            self.table.name = "SAS_Labels"
            self.table.fields.add(name="variable")
            self.table.fields.add(name="label")
        
            if self.debug:
                self._print_log(4)

        # Send table description
        table_header = (('qlik-tabledescription-bin', self.table.SerializeToString()),)
        self.context.send_initial_metadata(table_header)
コード例 #6
0
    def _translateScript(request, context):

        idNumList = []
        translationsList = []

        # Iterate over bundled rows
        for request_rows in request:
            # Iterate over rows
            for row in request_rows.rows:
                # grab the text
                text = [d.strData for d in row.duals][0]

                # grab the id
                idNum = [d.numData for d in row.duals][1]
                idNumList.append(idNum)

                # grab the source
                src = [d.strData for d in row.duals][2]

                # grab the destination
                dest = [d.strData for d in row.duals][3]

                translationsList.append(text)

        try:
            translator = Translator()
            translations = translator.translate(translationsList,
                                                src=src,
                                                dest=dest)
        except:
            pass

        resultList = [i.text for i in translations]
        logging.info('Records translated: ' + str(len(resultList)))

        # Create an iterable of dual with the result
        dualsList = []
        dualsList.append([SSE.Dual(numData=d) for d in idNumList])
        dualsList.append([SSE.Dual(strData=d) for d in resultList])

        response_rows = []
        for i in range(len(idNumList)):
            duals = [dualsList[z][i] for z in range(len(dualsList))]
            response_rows.append(SSE.Row(duals=iter(duals)))

        # Set and send Table header
        table = SSE.TableDescription(name='Translations')
        table.fields.add(dataType=SSE.NUMERIC)
        table.fields.add(dataType=SSE.STRING)
        md = (('qlik-tabledescription-bin', table.SerializeToString()), )
        context.send_initial_metadata(md)

        yield SSE.BundledRows(rows=response_rows)
コード例 #7
0
    def evaluate(self, context, script, ret_type, q):
        """
        Evaluates a script with given parameters and construct the result to a Row of duals.
        :param context:
        :param script:  script to evaluate
        :param ret_type: return data type
        :param q: data frame of received parameters, empty if no parameter was sent
        :return: a RowData of string dual
        """
        table = SSE.TableDescription()
        logging.debug('Received data frame (q): {}'.format(q))
        locals_added = {
        }  # The variables set while executing the script will be saved to this dict
        # Evaluate script, the result must be saved to the qResult object
        exec(script, {
            'q': q,
            'numpy': numpy,
            'pandas': pandas,
            'table': table
        }, locals_added)

        if 'qResult' in locals_added:
            qResult = locals_added['qResult']
            logging.debug('Result (qResult): {}'.format(qResult))

            if 'tableDescription' in locals_added and locals_added[
                    'tableDescription'] is True:
                self.send_table_description(table, context)

            # Transform the result to bundled rows
            bundledRows = SSE.BundledRows()
            if isinstance(qResult, str) or not hasattr(qResult, '__iter__'):
                # A single value is returned
                bundledRows.rows.add(duals=self.get_duals(qResult, ret_type))
            else:
                for row in qResult:
                    bundledRows.rows.add(duals=self.get_duals(row, ret_type))

            return bundledRows
        else:
            # No result was saved to qResult object
            msg = 'No result was saved to qResult, check your script.'
            self.raise_grpc_error(context, grpc.StatusCode.INVALID_ARGUMENT,
                                  msg)
コード例 #8
0
    def _send_table_description(self):
        """
        Send the table description to Qlik as meta data.
        Only used when the SSE is called from the Qlik load script.
        """
        
        # Set up the table description to send as metadata to Qlik
        self.table = SSE.TableDescription()
        self.table.name = "ProphetForecast"
        self.table.numberOfRows = len(self.response)

        # Set up fields for the table
        self.table.fields.add(name="ds")
        self.table.fields.add(name=self.result_type, dataType=1)
        
        if self.debug:
            self._print_log(5)
        
        # Send table description
        table_header = (('qlik-tabledescription-bin', self.table.SerializeToString()),)
        self.context.send_initial_metadata(table_header)
コード例 #9
0
    def evaluate(self, context, script, ret_type, params=[]):
        """
        Evaluates a script with given parameters and construct the result to a Row of duals.
        :param script:  script to evaluate
        :param ret_type: return data type
        :param params: params to evaluate. Default: []
        :return: a RowData of string dual
        """
        table = SSE.TableDescription()
        #print('JRP: {}' .format(table))
        # Evaluate script
        #print(script)
        #print(params)
        conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 'config', 'qrag.ini')
        ##print(conf_file)
        logging.info('Location of qrag.ini {}'.format(conf_file))
        config.read(conf_file)
        url = config.get('base', 'url')
        logging.debug('Precog URL {}'.format(url))

        if (script.find('TableInfo') != -1):
            result = self.getTableInfo(url)
            table.name = 'PreCog-Catalog'
            table.fields.add(name="Table_Id", dataType=0)
            table.fields.add(name="Name", dataType=0)
            table.fields.add(name="Column Desc", dataType=0)
        elif (script.find('TableMetaData') != -1):
            vTable = script[:-14]
            logging.info('TableMetadata vTable {}'.format(vTable))
            table.name = vTable + "-Metadata"
            result = []
            column_data = precog.get_column_info(vTable, url)
            table.fields.add(name="column", dataType=0)
            table.fields.add(name="type", dataType=0)
            for i in column_data:
                part = [i["column"], i["type"]]
                #print (part)
                result.append(part)
            #print(result)
        elif (script.find('getTableData') != -1):
            vTable = script[:-13]
            logging.info('getTableData vTable {}'.format(vTable))
            table.name = script[:-13]
            column_data = precog.get_column_info(vTable, url)
            for i in column_data:
                FieldName = i["column"]
                if (i["type"] == "number"):
                    FieldType = 0
                else:
                    FieldType = 0
                #logging.debug("Viewing Metadata from PreCog: {}" .format(i))
                #logging.debug('Adding Fields name :{}, dataType:{}' .format(FieldName, FieldType))
                table.fields.add(name=FieldName, dataType=FieldType)
            result = self.getTableData(url, vTable)
        else:
            result = []
        #logging.debug('Result: {}'.format(result))
        ###print(table)
        ###print(type(table))
        self.send_table_description(table, context)
        return result
コード例 #10
0
    def evaluate(self, context, script, ret_type, params=[]):
        """
        Evaluates a script with given parameters and construct the result to a Row of duals.
        :param script:  script to evaluate
        :param ret_type: return data type
        :param params: params to evaluate. Default: []
        :return: a RowData of string dual
        """
        # Evaluate script
        logging.info("In Evaluate {} {} {}" .format(script, ret_type, params))


        table = SSE.TableDescription()
        logging.info("python_finance Function {} called" .format(script))
        #If User name and Password Present Remove Username and Password and Pass only function name
        if(script.find('(') !=-1):
            index=script.index('(')
            Arguments = script[index:].strip(')(').split(',') 
            fName = script[:index]
            provider='yahoo'
        else:
            raise ValueError('Incorrect Formating of Function')
        
        if (script.find('get_ticker_data') !=-1):
            ticker = Arguments[0]
            start_date=Arguments[1]
            end_date=Arguments[2]
            result = self.get_ticker_data(ticker, start_date, end_date)
            logging.debug("result type  : {} data : {} " .format(type(result), result))
            converted = qlist.convert_df_list(result)
            logging.debug("converted type JRP : {} columns : {} data :{} " .format(type(converted[0]), converted[0], converted[1]))
            table.name = Arguments[0] +'- ticker_data'
            converted[0].insert(0, 'Ticker')
            for i in converted[0]:
                logging.debug('This is i {}'  .format(i))
                #if (i!='Date'):
                    #if (i!='Ticker'):
                 #       FieldName = ticker+' '+i
                    #else:
                #        FieldName = i
               # else:
                FieldName= i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            for x in converted[1]:
                x.insert(0, ticker.strip())
            result = converted[1]
            logging.debug("result type  : {} data : {} " .format(type(result), result))
            for x in result:
                logging.debug("x type  : {} data : {} " .format(type(x), x))
        elif (script.find('get_tickers') !=-1):
            tickers = Arguments[: len(Arguments) - 3]
            Arguments = Arguments[len(Arguments) - 3:]
            start_date=Arguments[0]
            end_date=Arguments[1]
            attrib = Arguments[2]
            logging.debug("get tickers - tickers: {} Arguments {} start_date : {} end_date :{} attrib :{} " .format(tickers, Arguments, start_date, end_date, attrib))
            result = self.get_tickers(tickers, start_date, end_date, attrib)
            converted = qlist.convert_df_list(result)
            table.name= ' '.join([str(elem) for elem in tickers]) + '-' + attrib + '-Data'
            logging.debug("column  {}" .format(converted[0]))
            x = 1
            for i in converted[0]:
                if(i!='Date'):
                    FieldName = 'Stock '+str(x)+' '+attrib
                    x += 1
                else:
                    FieldName= i    
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            result= converted[1]
            logging.debug("result {}" .format(result))
        elif (script.find('get_Percent_change') !=-1):
            tickers = Arguments[: len(Arguments) - 3]
            Arguments = Arguments[len(Arguments) - 3:]
            start_date=Arguments[0]
            end_date=Arguments[1]
            attrib = Arguments[2]
            logging.debug("get_Percent_change - tickers: {} Arguments {} start_date : {} end_date :{} attrib :{} " .format(tickers, Arguments, start_date, end_date, attrib))
            result = self.get_Percent_change(tickers, start_date, end_date, attrib)
            logging.debug("result - type: {} data: {}" .format(type(result), result))
            converted = qlist.convert_df_list(result)
            table.name= ' '.join([str(elem) for elem in tickers]) + '-' + attrib + '- Percent Change'
            #table.name= 'Percent Change'
            x =1
            logging.debug("column  {}" .format(converted[0]))
            for i in converted[0]:
                if(i!='Date'):
                    FieldName = 'Stock '+str(x)+'-Percent Change'
                    x += 1
                else:
                    FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            result= converted[1]
            logging.debug("result {}" .format(result))
        elif (script.find('get_Mean_Daily_Return') !=-1):
            tickers = Arguments[: len(Arguments) - 3]
            Arguments = Arguments[len(Arguments) - 3:]
            start_date=Arguments[0]
            end_date=Arguments[1]
            attrib = Arguments[2]
            logging.debug("get_Mean_Daily_Return - tickers: {} Arguments {} start_date : {} end_date :{} attrib :{} " .format(tickers, Arguments, start_date, end_date, attrib))
            result = self.get_Mean_Daily_Return(tickers, start_date, end_date, attrib)
            logging.debug("result - type: {} data: {} name: {}" .format(type(result), result, result.name))
            df_result = result.to_frame()
            logging.debug("df_result - type: {} data: {}" .format(type(df_result), df_result))
            temp_dict = df_result.to_dict("split")
            data =[]
            i =0
            for y in temp_dict['data']:
                y =  ['%.6f' % z for z in y]
                y.insert(0,temp_dict['index'][i].strip())
                data.append(y)
                i +=1
            table.name= ' '.join([str(elem) for elem in tickers]) + '-' + attrib + '- Mean Daily Returns'
            columns = ['Ticker' ,'Mean_Daily_Return']
            logging.debug("column  {}" .format(columns))
            for i in columns:
                FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            logging.debug("data type: {} data: {}" .format(type(data), data))
            result=data
            logging.debug("result {}" .format(result))
        elif (script.find('get_Cov_Matrix') !=-1):
            tickers = Arguments[: len(Arguments) - 3]
            Arguments = Arguments[len(Arguments) - 3:]
            start_date=Arguments[0]
            end_date=Arguments[1]
            attrib = Arguments[2]
            logging.debug("get_Cov_Matrix - tickers: {} Arguments {} start_date : {} end_date :{} attrib :{} " .format(tickers, Arguments, start_date, end_date, attrib))
            result = self.get_Cov_Matrix(tickers, start_date, end_date, attrib)
            logging.debug("result - type: {} data: {} " .format(type(result), result))
            converted = qlist.convert_df_list_cov(result)
            table.name= ' '.join([str(elem) for elem in tickers]) + '-' + attrib + '- Cov Matrix'
            logging.debug("column  {}" .format(converted[0]))
            for i in converted[0]:
                FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            result= converted[1]
            logging.debug("result {}" .format(result))
        elif (script.find('get_Simulated_Random_Portfolios') !=-1):
            logging.debug("Inside Else Block JRP ")
            tickers = Arguments[: len(Arguments) - 5]
            Arguments = Arguments[len(Arguments) - 5:]
            start_date=Arguments[0]
            end_date=Arguments[1]
            attrib = Arguments[2]
            num_portfolios=Arguments[3]
            rf=Arguments[4]
            logging.debug("get_Simulatd_Random_Portfolios - tickers: {} Arguments {} start_date : {} end_date :{} attrib :{} num_portfolios :{} rf : {} " .format(tickers, Arguments, start_date, end_date, attrib, num_portfolios, rf))
            mean_returns = self.get_Mean_Daily_Return(tickers, start_date, end_date, attrib)
            logging.debug("mean_returns - type: {} data: {} " .format(type(mean_returns), mean_returns))
            cov = self.get_Cov_Matrix(tickers, start_date, end_date, attrib)
            logging.debug("cov - type: {} data: {} " .format(type(cov), cov))
            result = self.get_Simulated_Random_Portfolios(num_portfolios, mean_returns, cov, rf,tickers)
            logging.debug("result - type: {} data: {} " .format(type(result), result[1]))
            converted = qlist.convert_df_list_sim(result[0])
            table.name= ' '.join([str(elem) for elem in tickers]) + '-' +'- Simulated_Random_Portfolios'
            logging.debug("column  {}" .format(converted[0]))
            x=1
            for i in converted[0]:
                FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            result= converted[1]
            logging.debug("result {}" .format(result))
        else:
            result = []

        self.send_table_description(table, context)
        return result
コード例 #11
0
    def _prophetScript(request, context):
        """
        Mirrors the input and sends back the same data.
        :param request: iterable sequence of bundled rows
        :return: the same iterable sequence as received
        """

        # instantiate a list for measure data
        dateStampList = []
        figuresList = []
        forecastPeriods = None
        forecastType = None
        m = None
        yhat = None
        changePoint = None
        minFloor = None
        maxCap = None

        for request_rows in request:

            # iterate over each request row (contains rows, duals, numData)

            # pull duals from each row, and the numData from duals
            for row in request_rows.rows:
                # the first numData contains the date stamps
                dateStamps = [d.numData for d in row.duals][0]
                pythonDate = datetime.fromordinal(
                    datetime(1900, 1, 1).toordinal() + int(dateStamps) - 2)
                dateStampList.append(pythonDate)

                # the second numData contains the figures
                figures = int([d.numData for d in row.duals][1])
                figuresList.append(figures)

                # this is redundant and is the same in every row
                if not forecastPeriods:
                    forecastPeriods = int([d.numData for d in row.duals][2])
                if not forecastType:
                    forecastType = [d.strData for d in row.duals][3]
                if not yhat:
                    yhat = [d.strData for d in row.duals][6]
                if not changePoint:
                    changePoint = int([d.numData for d in row.duals][7])
                if not minFloor:
                    minFloor = int([d.numData for d in row.duals][8])
                if not maxCap:
                    maxCap = int([d.numData for d in row.duals][9])

        # create data frame
        dataFrame = pd.DataFrame({'ds': dateStampList, 'y': figuresList})
        print(dataFrame)
        if forecastType == 'hourly':
            # fit data to prophet
            m = Prophet(changepoint_prior_scale=changePoint)
            m.fit(dataFrame)

            #create future dataframe
            future = m.make_future_dataframe(periods=forecastPeriods, freq='H')

        if forecastType == 'daily':
            # fit data to prophet
            m = Prophet(changepoint_prior_scale=changePoint)
            m.fit(dataFrame)

            #create future dataframe
            future = m.make_future_dataframe(periods=forecastPeriods)

        if forecastType == 'monthly':
            # fit data to prophet

            m = Prophet(weekly_seasonality=False,
                        changepoint_prior_scale=changePoint)
            m.add_seasonality(name='monthly', period=30.5, fourier_order=5)
            m.fit(dataFrame)

            #create future dataframe
            future = m.make_future_dataframe(periods=forecastPeriods,
                                             freq='MS')

        #create forecast and create a list
        if not m:
            # fit data to prophet

            m = Prophet(weekly_seasonality=False,
                        changepoint_prior_scale=changePoint)
            m.add_seasonality(name='monthly', period=30.5, fourier_order=5)
            m.fit(dataFrame)

        forecast = m.predict(future)
        forecastList = forecast[yhat].values.tolist()
        dateList = pd.to_datetime(forecast['ds'].values.tolist())

        #convert forecast results to ints
        resultsList = []
        for val in forecastList:
            try:
                resultsList.append(int(val))
            except:
                resultsList.append(0)

        finalDateList = []
        for ds in dateList:
            try:
                finalDateList.append(str(ds))
            except:
                finalDateList.append(0)

        # Create an iterable of dual with the result
        dualsList = []
        dualsList.append([SSE.Dual(numData=d) for d in resultsList])
        dualsList.append([SSE.Dual(strData=d) for d in finalDateList])

        #create response rows
        response_rows = []
        for i in range(len(resultsList)):
            duals = [dualsList[z][i] for z in range(len(dualsList))]
            response_rows.append(SSE.Row(duals=iter(duals)))

        #set and send table header
        table = SSE.TableDescription(name='ProphetForecast')
        table.fields.add(dataType=SSE.NUMERIC)
        table.fields.add(dataType=SSE.STRING)
        md = (('qlik-tabledescription-bin', table.SerializeToString()), )
        context.send_initial_metadata(md)

        yield SSE.BundledRows(rows=response_rows)
コード例 #12
0
    def evaluate(self, context, script, ret_type, q):
        """
        Evaluates a script with given parameters and construct the result to a Row of duals.
        :param context:
        :param script:  script to evaluate
        :param ret_type: return data type
        :param q: data frame of received parameters, empty if no parameter was sent
        :return: a RowData of string dual
        """
        table = SSE.TableDescription()
        logging.debug('Received data frame (q): {}'.format(q))
        locals_added = {
        }  # The variables set while executing the script will be saved to this dict
        # Evaluate script, the result must be saved to the qResult object
        exec(script, {
            'q': q,
            'numpy': numpy,
            'pandas': pandas,
            'table': table
        }, locals_added)

        if 'qResult' in locals_added:
            qResult = locals_added['qResult']
            logging.debug('Result (qResult): {}'.format(qResult))

            if 'tableDescription' in locals_added and locals_added[
                    'tableDescription'] is True:
                self.send_table_description(table, context)
                # If a tableDescription is sent, the return type should be updated accordingly
                ret_type = [
                    ReturnType(field.dataType) for field in table.fields
                ]
            else:
                # All returned columns have the same data type as the return type of the function
                if isinstance(qResult,
                              str) or not hasattr(qResult, '__iter__'):
                    columns = 1
                else:
                    if type(qResult) in [list, tuple]:
                        # Transformed to an array for simplifying getting the shape of the result, which can be of
                        # different types
                        qResult = numpy.array(qResult)
                    columns = 1 if len(
                        qResult.shape) == 1 else qResult.shape[1]
                ret_type = [ret_type] * columns

            # Transform the result to bundled rows
            bundledRows = SSE.BundledRows()
            if isinstance(qResult, str) or not hasattr(qResult, '__iter__'):
                # A single value is returned
                bundledRows.rows.add(duals=self.get_duals(qResult, ret_type))
            else:
                for row in qResult:
                    bundledRows.rows.add(duals=self.get_duals(row, ret_type))

            return bundledRows
        else:
            # No result was saved to qResult object
            msg = 'No result was saved to qResult, check your script.'
            self.raise_grpc_error(context, grpc.StatusCode.INVALID_ARGUMENT,
                                  msg)
コード例 #13
0
    def _geocodeScript(request, context):
        api_selection = None
        resultList = []
        idNumList = []

        # counter
        z = 1

        # Iterate over bundled rows
        for request_rows in request:

            # Iterate over rows
            for row in request_rows.rows:
                logging.info('Geocoding address # ' + str(z))
                z += 1

                # The first strData is the coordinate
                data = [d.strData for d in row.duals][0]

                # grab the id
                idNum = [d.numData for d in row.duals][1]
                idNumList.append(idNum)

                # Grab selected API
                # Possible choices are: Google, Open Street, GeocoderDotUS
                if not api_selection:
                    api_selection = [d.strData for d in row.duals][2]

                    if 'google' in api_selection.lower():
                        geolocator = GoogleV3()
                    elif 'open street' in api_selection.lower():
                        geolocator = Nominatim(scheme='http')
                    elif 'geocoderdotus' in api_selection.lower():
                        geolocator = GeocoderDotUS()
                    else:
                        geolocator = Nominatim()

                # geocode
                for i in range(2):
                    try:
                        location = geolocator.geocode(data, timeout=2)
                        break
                    except geopy.exc.GeocoderTimedOut:
                        pass
                    except geopy.exc.GeocoderQueryError:
                        pass
                    except urllib.error.HTTPError:
                        pass

                try:
                    latitude = location.latitude
                except AttributeError:
                    latitude = 'Unavailable'
                try:
                    longitude = location.longitude
                except AttributeError:
                    longitude = 'Unavailable'

                coordinates = '[' + str(longitude) + ', ' + str(latitude) + ']'

                resultList.append(coordinates)

        # Create an iterable of dual with the result
        dualsList = []
        dualsList.append([SSE.Dual(numData=d) for d in idNumList])
        dualsList.append([SSE.Dual(strData=d) for d in resultList])

        response_rows = []
        for i in range(len(idNumList)):
            duals = [dualsList[z][i] for z in range(len(dualsList))]
            response_rows.append(SSE.Row(duals=iter(duals)))

        # Set and send Table header
        table = SSE.TableDescription(name='Geocodes')
        table.fields.add(dataType=SSE.NUMERIC)
        table.fields.add(dataType=SSE.STRING)
        md = (('qlik-tabledescription-bin', table.SerializeToString()), )
        context.send_initial_metadata(md)

        yield SSE.BundledRows(rows=response_rows)
コード例 #14
0
    def evaluate(self, context, script, ret_type, params=[]):
        """
        Evaluates a script with given parameters and construct the result to a Row of duals.
        :param script:  script to evaluate
        :param ret_type: return data type
        :param params: params to evaluate. Default: []
        :return: a RowData of string dual
        """
        # Evaluate script
        logging.info("In Evaluate {} {} {}" .format(script, ret_type, params))


        table = SSE.TableDescription()
        logging.info("Peleton Function {} called" .format(script))
        #If User name and Password Present Remove Username and Password and Pass only function name
        if(script.find('(') !=-1):
            index=script.index('(')
            UserPass = script[index:]
            script = script[:index]
            UserPass = (UserPass.replace('(','')).replace(')','')
            index=UserPass.index(',')
            Pass = (UserPass[index:]).replace(',','')
            User = UserPass[:index]
            logging.debug("index {}, Script {} , UserPass {}, User {} Pass {}" .format(index, script, UserPass, User, Pass ))
            session = self.get_all_sessions(User, Pass)
        url = config.get(script, 'url')

        if (script.find('get_all_instructors') !=-1):
            result = self.get_all_instructors(url)
            #list of Dictionary returned
            logging.debug("result type  : {} data : {} " .format(type(result), result))
            remlist = (config.get(script, 'remlist')).strip('][').split(', ')
            logging.debug("Remlist Type {}, List {}" .format(type(remlist), remlist))
            if (len(remlist)) > 1:
                result = self.remove_columns(remlist, result)
            logging.debug("result type  : {} data : {} " .format(type(result), result))
            converted = qlist.convert_list_of_dicts(result)
            logging.debug("converted type JRP : {} columns : {} data :{} " .format(type(converted[0]), converted[0], converted[1]))
            
            table.name = 'Peloton-Instructor'
            for i in converted[0]:
                FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            result = converted[1]
            logging.debug("result type  : {} data : {} " .format(type(result), result))
            for x in result:
                logging.debug("x type  : {} data : {} " .format(type(x), x))
        elif (script.find('get_all_sessions') !=-1):
            logging.debug("get_all_sessions")
            UserData = session[2]
            UserWorkout = session[3]
            UserId=session[4]
            #filter all the non values
            #converst string representation to list
            remlist = (config.get(script, 'remlist')).strip('][').split(', ')
            logging.debug("Remlist Type {}, List {}" .format(type(remlist), remlist))
            if (len(remlist)) > 1:
                result = self.remove_columns_dict(remlist, UserData)
            else:
                result = UserData

            converted = qlist.convert_dicts_list(result)
            table.name= User +'- Peloton User Data'
            logging.debug("column  {}" .format(converted[0]))
            for i in converted[0]:
                FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            result=[]
            result.append(converted[1])
            logging.debug("result {}" .format(result))
            
        elif (script.find('get_all_workouts') !=-1):
            logging.debug("Calling get_all_workouts")
            UserData = session[2]
            UserWorkout = session[3]
            UserId=session[4]
            remlist = (config.get(script, 'remlist')).strip('][').split(', ')
            logging.debug("Remlist Type {}, List {}" .format(type(remlist), remlist))
            if (len(remlist)) > 1:
                result = self.remove_columns(remlist, UserWorkout)
            else:
                result = UserWorkout
            logging.debug("result type  : {} data : {} " .format(type(result), result))
            converted = qlist.convert_list_of_dicts(result)
            logging.debug("converted type JRP : {} columns : {} data :{} " .format(type(converted[0]), converted[0], converted[1]))
            #insert user id
            converted[0].insert(0, 'user_id')
            for x in converted[1]:
                x.insert(0, UserId)
            table.name = User +'- Peloton Work Out Data'
            for i in converted[0]:
                FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            result = converted[1]
            logging.debug("result type  : {} data : {} " .format(type(result), result))
         
       
        elif (script.find('get_all_rides') !=-1):
            result =[]
            options = config.get(script, 'options')
            url = config.get(script, 'url')
            UserData = self.get_all_workouts(session[0],url, session[2]["id"], options)
            logging.debug("get all workout {}" .format(UserData))
            UserData = UserData['data']
            UserId= session[4]
            UserData_Flattened = []
            logging.debug("UserData Type {}, List {}" .format(type(UserData), UserData))
            for x in UserData:
                logging.debug("UserDataElements Type {}, List {}" .format(type(x), x))
                flattend = flatten(x, reducer = 'underscore')
                UserData_Flattened.append(flattend)
            
            remlist = (config.get(script, 'remlist')).strip('][').split(', ')
            logging.debug("Remlist Type {}, List {}" .format(type(remlist), remlist))
            if (len(remlist)) > 1:
                result = self.remove_columns(remlist, UserData_Flattened)
            else:
                result = UserData_Flattened
            converted = qlist.convert_list_of_dicts(result)
            logging.debug("converted type JRP : {} columns : {} data :{} " .format(type(converted[0]), converted[0], converted[1]))
            converted[0].insert(0, 'user_id')
            for x in converted[1]:
                x.insert(0, UserId)
            table.name= User +'- Peloton Ride Data'
            for i in converted[0]:
                FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            result = converted[1]
            logging.debug("result type  : {} data : {} " .format(type(result), result))
            
        elif (script.find('get_all_output') !=-1):
            result =[]
            #get a list of workout ids
            options = config.get(script, 'options')
            url = config.get(script, 'user_url')
            UserData = self.get_all_workouts(session[0],url, session[2]["id"], options)
            UserData = UserData['data']
            logging.debug('UserData type {} and UserData {}' .format(type(UserData), UserData))
         
            workout_ids =[]
            for x in UserData:
                workout_id = (x['id'])
                workout_ids.append(workout_id)
                logging.debug("Workout ID Type {}, Data {}" .format(type(workout_ids), workout_ids))
            options = config.get(script, 'options_summary')
            url = config.get(script, 'url')
            
            remlist = (config.get(script, 'remlist')).strip('][').split(', ')
            logging.debug("Remlist Type {}, List {}" .format(type(remlist), remlist))
            logging.debug(len(remlist))
            
            for x in workout_ids:
                UserData = self.get_all_details(session[0],url, x, options).json()
                logging.debug('UserData type {} and UserData {}' .format(type(UserData), UserData))
                if (len(remlist)) > 1:
                    temp = self.remove_columns_dict(remlist, UserData)
                    logging.debug('Removed UserData type {} and UserData {}' .format(type(UserData), UserData))
                else:
                    temp = UserData
                    logging.debug('No Var UserData type {} and UserData {}' .format(type(UserData), UserData))
                logging.debug('Temp type {} and Temp {}' .format(type(temp), temp))
                converted = qlist.convert_dicts_list(temp)
                result.append(converted[1])
                
            table.name= User +'- Peloton Output Data'
            for i in converted[0]:
                FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            #result = [['a','b','c'],['a','b','c']]
            logging.debug("result {}" .format(result))
        elif (script.find('get_apple_watch_output') !=-1):
            result =[]
            #get a list of workout ids
            options = config.get(script, 'options')
            url = config.get(script, 'user_url')
            UserData = self.get_all_workouts(session[0],url, session[2]["id"], options)
            UserData = UserData['data']
            logging.debug('UserData type {} and UserData {}' .format(type(UserData), UserData))
            workout_ids =[]
            for x in UserData:
                workout_id = (x['id'])
                workout_ids.append(workout_id)
                logging.debug("Workout ID Type {}, Data {}" .format(type(workout_ids), workout_ids))
            options = config.get(script, 'options_summary')
            url = config.get(script, 'url')
            
            remlist = (config.get(script, 'remlist')).strip('][').split(', ')
            logging.debug("Remlist Type {}, List {}" .format(type(remlist), remlist))
            logging.debug(len(remlist))
            
            for x in workout_ids:
                UserData = self.get_all_details(session[0],url, x, options).json()
                logging.debug('UserData type {} and UserData {}' .format(type(UserData), UserData))
                key_to_lookup = 'apple_watch_active_calories'
                if (len(remlist)) > 1:
                    temp = self.remove_columns_dict(remlist, UserData)
                    logging.debug('Removed UserData type {} and UserData {}' .format(type(UserData), UserData))
                else:
                    logging.debug('No Var UserData type {} and UserData {}' .format(type(UserData), UserData))
                    temp = UserData
                if key_to_lookup in temp:
                    logging.debug('We have no Apple Watchj Data {} {}' .format(type(temp), temp))
                else :
                    temp['apple_watch_active_calories'] = ''
                    temp['apple_watch_total_calories'] = ''
                logging.debug('Temp type {} and Temp {}' .format(type(temp), temp))
                converted = qlist.convert_dicts_list(temp)
                result.append(converted[1])
            table.name= User +'- Peloton Apple Watch Output Data'
            for i in converted[0]:
                FieldName = i
                FieldType=0
                table.fields.add(name=FieldName, dataType=FieldType)
            logging.debug('JRP Columns type {} and columns {}' .format(type(converted[0]), converted[0]))
            logging.debug("result {}" .format(result))
        else:
            result = []

        self.send_table_description(table, context)
        return result