Example #1
0
 def get_data_grid_rows(self):
     utils.debug("get data rows from data grid.")
     trs = self.slb.get_elements(datagrid.table_trs)
     if not trs:
         utils.warn("Not find any data gird data rows!")
     utils.debug("got total [{}] row data records".format(len(trs)))
     return trs
def request(req, session, payload=None):
    try:
        if 'GET' in req:
            return session.get(
                TARGET,
                verify=False,
                allow_redirects=False)

        elif 'POST' in req:
            return session.post(
                TARGET,
                data=payload,
                verify=False,
                allow_redirects=False)

    except exceptions.HTTPError as eh:
        print(u.error(), 'HTTPError:', eh)
        quit()

    except exceptions.ConnectionError as ec:
        print(u.error(), 'ConnectionError:', ec)
        quit()

    except exceptions.Timeout as et:
        print(u.warn(), 'Timeout:', et)

    except exceptions.RequestException as er:
        print(u.warn(), 'RequestException:', er)
Example #3
0
    def get_target_row(self, row_unique_key_name, row_unique_key):
        """
        get target row in data grid matched unique key value(eg: type, ruleId)
        """
        target_row, target_row_id, action_column_index = [], None, -1
        trs = self.get_data_grid_rows()
        headers = self.get_data_grid_headers()

        for index, header in enumerate(headers):
            if header == constants.ActionsColumnName or header == constants.ActionColumnName:
                action_column_index = index  # get the [Action] column index
                break
        for tr in trs:
            tds_in_tr = self.slb.get_elements_via_element(tr, datagrid.td)
            target_row_id = self.slb.get_element_text(
                tds_in_tr[0])  # first column value, help find row on ui
            for td in tds_in_tr:
                if self.slb.get_element_text(td) == row_unique_key:
                    utils.log("find the data record with {}={}".format(
                        row_unique_key_name, row_unique_key))
                    target_row.append(tr)
                    break
            if target_row:
                break
        if not target_row:
            utils.warn("Not find any data record with {}={} !".format(
                row_unique_key_name, row_unique_key))
            return
        return target_row, target_row_id, action_column_index
Example #4
0
def post_json_files(root):
    """
    Post json objects in a designated directory to BuildingOS.

    Params:
        root string
    """
    json_dir = defaults.json_dir(root)
    archive = defaults.json_archive(root)
    post_url = defaults.BOS_URL

    json_files = utils.get_files_in_dir(json_dir)
    if not json_files:
        utils.warn('No JSON files to process. Terminating')
        exit()

    utils.print_time('LOADER START')
    for json_file in json_files:
        print('Posting file: %s ...' % (json_file)),
        with open(json_file, 'rb') as jf:
            payload = {'data': jf}
            response = requests.post(post_url, files=payload)
            print('done')

            print('Server response: %s' % (response.text))

        utils.move(json_file, archive)

    utils.print_time('LOADER END')
Example #5
0
def post_json_files(root):
    """
    Post json objects in a designated directory to BuildingOS.

    Params:
        root string
    """
    json_dir = defaults.json_dir(root)
    archive = defaults.json_archive(root)
    post_url = defaults.BOS_URL

    json_files = utils.get_files_in_dir(json_dir)
    if not json_files:
        utils.warn('No JSON files to process. Terminating')
        exit()

    utils.print_time('LOADER START')
    for json_file in json_files:
        print('Posting file: %s ...' % (json_file)),
        with open(json_file, 'rb') as jf:
            payload = {'data': jf}
            response = requests.post(post_url, files=payload)
            print('done')

            print('Server response: %s' % (response.text))

        utils.move(json_file, archive)

    utils.print_time('LOADER END')
def create_json(root):
    """
    Create the json file containing reading data.

    Params:
        root string
    """
    data_dir = defaults.downloads(root)
    output_dir = defaults.json_dir(root)
    archive = defaults.data_archive(root)

    catalog = []
    data = []
    json_file = {}

    data_files = utils.get_files_in_dir(data_dir)
    if not data_files:
        utils.warn('No csv files to process. Terminating')
        exit()

    utils.print_time('PROCESSOR START')
    print('Begin JSON file generation')
    for data_file in data_files:
        with open(data_file, 'rb') as f:
            reader = csv.reader(f)
            meterId, meterName = reader.next()

            print('Processing meterId %s ...' % (meterId)),

            info = {'meterId': meterId, 'meterName': meterName}
            catalog.append(info)

            for row in reader:
                ts = row[0]
                val = float(row[1])
                reading = {'timestamp': ts,
                           'value': val,
                           'meterId': meterId}
                data.append(reading)

            print('done')
        utils.move(data_file, archive)

    json_file['datasource'] = defaults.URI
    json_file['meterCatalog'] = catalog
    json_file['readings'] = data

    print('End JSON file generation')

    curr_dt = datetime.now()
    json_fname = 'dump_%s.json' % (utils.format_dt(curr_dt))
    save_path = os.path.join(output_dir, json_fname)

    print('Writing JSON to file %s ...' % (save_path)),
    with open(save_path, 'wb') as out:
        json.dump(json_file, out)
        print('done')

    utils.print_time('PROCESSOR END')
Example #7
0
 def view_add_multiple_mail_recipient(self, mail_recip_list):
     utils.log("Add multiple mail recipient: {}".format(mail_recip_list))
     if not mail_recip_list:
         utils.warn(
             "There is No item in EmailRecipients, please check your configuration!!!"
         )
         return
     for index, item in enumerate(mail_recip_list):
         self.view_add_mail_recipient(item, index)
Example #8
0
 def get_data_grid_headers(self):
     utils.debug("get headers from data grid.")
     headers = []
     headers_elements = self.slb.get_elements(datagrid.headers)
     for header in headers_elements:
         headers.append(self.slb.get_element_text(header))
     if not headers:
         utils.warn("not find any headers for this data grid!")
     utils.debug("got [{}] data gird headers.".format(len(headers)))
     return headers
Example #9
0
    def click_action_in_row(self, action_name, row_unique_key_name,
                            row_unique_key):
        """
        Click Action [View], [Edit] etc. in the specific data row.
        :param action_name: which action button need to click
        :param row_unique_key_name: the unique key name for current row. eg:[Type], [Email Title], [Rule Id]
        :param row_unique_key: the unique key value.
        """
        action_btn_list = self.get_action_btn_list(row_unique_key_name,
                                                   row_unique_key)

        for action_btn in action_btn_list:
            if self.slb.get_element_text(action_btn) == action_name:
                utils.log("click [{}] on row with [{}]={}".format(
                    action_name, row_unique_key_name, row_unique_key))
                action_btn.click()
                return
        utils.warn(
            "Not find record in data grid with [{}]={}, please check!".format(
                row_unique_key_name, row_unique_key))
        self.slb.sleep(2)
Example #10
0
def get_select_people_complex_payload(crm_complex_data, grp1_label_list,
                                      grp2_label_list):
    """
    generate two group search people complex json.
    :param crm_complex_data:
    :param grp1_label_list: group 1 labels list, the groupId for labels in list should be same in yml.
    :param grp2_label_list: group 2 labels list, eg: ['latestRiskScore', 'installDate']
    :return: payload json
    """
    if not isinstance(crm_complex_data, dict) or len(crm_complex_data) <= 0:
        utils.warn('please pass a valid dict parameter')
        return None
    group1_label_data_list, group2_label_data_list = [], []
    for label in grp1_label_list:
        group1_label_data_list.append(crm_complex_data[label])
    for label in grp2_label_list:
        group2_label_data_list.append(crm_complex_data[label])
    search_complex_payload = SearchLabelGroupReq([
        get_logical_condition(group1_label_data_list),
        get_logical_condition(group2_label_data_list)
    ])
    json_str = utils.dump_obj(search_complex_payload)
    payload_json = utils.load_json(json_str)
    return payload_json
Example #11
0
 def verify_es_details_with_src(self, es_check_list, src_check_list, es_data_list, src_query, partition_0=None):
     if len(es_check_list) != len(src_check_list):
         return utils.warn("please make sure the src and es compare fields number equal!")
     es_data = es_data_list if len(es_data_list) <= 3 else es_data_list[0:3]
     for num, es in enumerate(es_data, start=1):
         es_actual, src_expect = [], []
         utils.log("check record #{}".format(num))
         if partition_0:
             src = self.reds.query_results(src_query.format(es['accountid'], partition_0))[0]
         else:
             src = self.reds.query_results(src_query.format(es['accountid']))[0]
         for es_col, src_col in zip(es_check_list, src_check_list):
             es_actual.append(str(es[es_col]) if es[es_col] is not None else es[es_col])
             src_expect.append(str(src[src_col]) if src[src_col] is not None else src[src_col])
         utils.log("verify es and src [{}] data matched for below fields.\n{}".format(src_query.split(' ')[3], es_check_list))
         self.assertListEqual(src_expect, es_actual, 'Not Match between es and src data!')
def get_session_tokens(session, do_print=False):
    success = True
    response = request("GET", session)

    try:
        soup = BeautifulSoup(response.text, 'html.parser')
        csrf_token = soup('input', {'name': CSRF_TOKEN})[0]['value']

        session_cookie = re \
            .search(SESSION_COOKIE + '=(.*?);', response.headers['set-cookie']) \
            .group(1)

    except Exception as e:
        print(u.warn(), 'NO TOKENS(S) FOUND')
        return '', ''

    if success and do_print:
        print_tokens(csrf_token, session_cookie)

    return session_cookie, csrf_token
Example #13
0
def run_batch(root, start, end, idx=None):
    """
    Run this script in batch mode. Download reading data whose timestamps
    lie within start and end dates.

    The date must follow the following format (Note the T between date and time):
        YYYY-MM-DDTHH:MM:SS

    where 24 hour time is used.
    
    If idx is a non-negative integer, instead download the meter at that index.
    idx is zero-indexed. If idx is greater than the number of meters, nothing
    happens; no files are downloaded. Default behavior is to download data for
    all meters.

    Params:
        root string
        start string
        end string
        idx integer
    """
    s_date = get_date(start)
    e_date = get_date(end)

    if not s_date or not e_date:
        raise ValueError('Invalid/missing dates')
    elif start > end:
        raise ValueError('Start date must come before end date')
    elif not utils.exists_dir(root):
        raise ValueError('Root directory not found')
    elif idx is not None and not is_valid_index(idx):
        raise ValueError('Index must be non-negative integer')

    creds_file = defaults.creds(root)
    cnxn_str = utils.get_cnxn_str(creds_file)
    output_dir = defaults.downloads(root)
    meter_file = defaults.meter_file(root)

    utils.print_time('GETTER START')

    with Cursor.Cursor(cnxn_str) as cursor:
        dq = get_reading_from_name_query_str()
        meters = utils.read_meter_file(meter_file)
        for i, m in enumerate(meters):
            if idx is not None and idx != i:
                continue
            ion_name = utils.get_ion_name(m)
            qid = utils.get_ion_qid(m)
            try:
                cursor.execute(dq, ion_name, qid, str(s_date), str(e_date))
            except pyodbc.Error:
                utils.error(
                    'Problem with query to get data for meter %s qid %d' %
                    (ion_name, qid))
                continue
            if cursor.rowcount == 0:
                utils.warn('No data found for meter %s qid %d' %
                           (ion_name, qid))
                continue

            meterId, meterName = utils.get_lucid_id_and_name(m)
            s_date_str = utils.make_lucid_ts(str(s_date))
            e_date_str = utils.make_lucid_ts(str(e_date))
            dl_fname = "%sT%sT%s.csv" % (meterId, s_date_str, e_date_str)
            path = os.path.join(output_dir, dl_fname)

            print('Writing data for meter %s qid %d to file: %s ...' %
                  (ion_name, qid, path)),
            with open(path, 'wb') as data_file:
                writer = csv.writer(data_file)
                writer.writerow([meterId, meterName])

                for row in cursor:
                    ts = row.TimestampUTC
                    val = row.Value
                    data_row = [utils.make_lucid_ts(ts), val]
                    writer.writerow(data_row)
                print('done')
    utils.print_time('GETTER END')
Example #14
0
def run_batch(root, start, end, idx=None):
    """
    Run this script in batch mode. Download reading data whose timestamps
    lie within start and end dates.

    The date must follow the following format (Note the T between date and time):
        YYYY-MM-DDTHH:MM:SS

    where 24 hour time is used.
    
    If idx is a non-negative integer, instead download the meter at that index.
    idx is zero-indexed. If idx is greater than the number of meters, nothing
    happens; no files are downloaded. Default behavior is to download data for
    all meters.

    Params:
        root string
        start string
        end string
        idx integer
    """
    s_date = get_date(start)
    e_date = get_date(end)

    if not s_date or not e_date:
        raise ValueError('Invalid/missing dates')
    elif start > end:
        raise ValueError('Start date must come before end date')
    elif not utils.exists_dir(root):
        raise ValueError('Root directory not found')
    elif idx is not None and not is_valid_index(idx):
        raise ValueError('Index must be non-negative integer')

    creds_file = defaults.creds(root)
    cnxn_str = utils.get_cnxn_str(creds_file)
    output_dir = defaults.downloads(root)
    meter_file = defaults.meter_file(root)

    utils.print_time('GETTER START')

    with Cursor.Cursor(cnxn_str) as cursor:
        dq = get_reading_from_name_query_str()
        meters = utils.read_meter_file(meter_file)
        for i, m in enumerate(meters):
            if idx is not None and idx != i:
                continue
            ion_name = utils.get_ion_name(m)
            qid = utils.get_ion_qid(m)
            try:
                cursor.execute(dq, ion_name, qid, str(s_date), str(e_date))
            except pyodbc.Error:
                utils.error('Problem with query to get data for meter %s qid %d' % (ion_name, qid))
                continue
            if cursor.rowcount == 0:
                utils.warn('No data found for meter %s qid %d' % (ion_name, qid))
                continue

            meterId, meterName = utils.get_lucid_id_and_name(m)
            s_date_str = utils.make_lucid_ts(str(s_date))
            e_date_str = utils.make_lucid_ts(str(e_date))
            dl_fname = "%sT%sT%s.csv" % (meterId, s_date_str, e_date_str)
            path = os.path.join(output_dir, dl_fname)

            print('Writing data for meter %s qid %d to file: %s ...' % (ion_name, qid, path)),
            with open(path, 'wb') as data_file:
                writer = csv.writer(data_file)
                writer.writerow([meterId, meterName])

                for row in cursor:
                    ts = row.TimestampUTC
                    val = row.Value
                    data_row = [utils.make_lucid_ts(ts), val]
                    writer.writerow(data_row)
                print('done')
    utils.print_time('GETTER END')