Example #1
0
    def prepare_alphafold_dataset(self, alpha):
        print(f'Preparing dataset for alpha-{alpha}')
        if alpha != self.ALPHAs:
            self.gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')
        else:
            self.gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')

        self.alpha_prepared_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')
        self.alpha_model_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{self.gt_years}')
        
        for pkl in utils.find(f'*alpha_{alpha}_preprocessed.pkl', self.alpha_prepared_dir):
            if "target_ds_train" in pkl: self.target_ds_preprocessed_path = pkl
            elif "rf_ds_train" in pkl: self.rf_ds_preprocessed_path = pkl
            elif "target_ds_test" in pkl: self.x_test_path = pkl
            elif "rf_ds_test" in pkl: self.y_test_path = pkl
        
        LocalModelParams(self, utils.open_pickle(self.target_ds_preprocessed_path))

        if utils.find('*standardized_stacked_arr.pkl', self.alpha_prepared_dir):
            self.alpha_standardized_stacked_arr_path = utils.find(f'*standardized_stacked_arr.pkl', self.alpha_prepared_dir)[0]
        else:
            self.alpha_standardized_stacked_arr_path = prepare.flatten_and_standardize_dataset(self, self.alpha_prepared_dir)
        print(f'--> Months for this dataset are: {self.month_names}')

        print(
            f'paths created @ prepare_alphafold_dataset():\nself.alpha_prepared_dir: "{self.alpha_prepared_dir}", \nself.alpha_model_dir: "{self.alpha_model_dir}"'
            f'\nself.target_ds_preprocessed_path: "{self.target_ds_preprocessed_path}", \nself.rf_ds_preprocessed_path: "{self.rf_ds_preprocessed_path}"' \
            f'\nself.rf_ds_preprocessed_path: "{self.rf_ds_preprocessed_path}", \nself.x_test_path: "{self.x_test_path}", \nself.y_test_path: "{self.y_test_path}"' \
            f'\nself.alpha_standardized_stacked_arr_path: "{self.alpha_standardized_stacked_arr_path}", \nself.gt_years: {self.gt_years}' \
            )
Example #2
0
def brickfind_crawl(brick, args):
    if brick.endswith("/"):
        brick = brick[0:len(brick)-1]

    working_dir = os.path.dirname(args.outfile)
    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)

    with open(args.outfile, "a+") as fout:
        brick_path_len = len(brick)

        def output_callback(path, filter_result):
            path = path.strip()
            path = path[brick_path_len+1:]
            output_write(fout, path, args.output_prefix, encode=True)

        ignore_dirs = [os.path.join(brick, dirname)
                       for dirname in
                       conf.get_opt("brick_ignore_dirs").split(",")]

        find(brick, callback_func=output_callback,
             ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
def test_registration(browser):
    # Data
    page_heading_locator = 'form#register_form h2'
    page_heading = 'Зарегистрироваться'
    success_message = 'Спасибо за регистрацию!'
    success_message_locator = '//div[contains(text(), "Спасибо за регистрацию")]'

    # Arrange
    browser.get(_locators.main_page_link)

    utils.find(browser, _locators.login_link).click()
    page_detector_text = utils.find(browser, page_heading_locator).text
    assert page_heading in page_detector_text, \
        "Search page heading '%s' should contain text '%s'" % (page_detector_text, page_heading)

    # Steps
    utils.registrate(browser, test_data.new_email, test_data.password)

    WebDriverWait(browser, 5).until(
        EC.presence_of_element_located((By.XPATH, success_message_locator)))

    # Assert
    success_message_text = utils.find_xpath(browser,
                                            success_message_locator).text
    assert success_message in success_message_text, \
        "Search success message on page '%s'should contain text '%s'" % (success_message_text, success_message)
Example #4
0
    def detect_serialized_datasets(self):
        """
        Finding raw data pickles.
        If none found, proceed to creating pickles out of raw data.
        calls - 
        1. prepare.get_raw_input_data
        2. prepare.get_raw_target_data
        3. prepare.prepare_dataset
        """
        prepared_data_dir = str(utils.prepared_data_folder / self.dir_str / self.period)
        os.makedirs(prepared_data_dir, exist_ok=True)
        self.prepared_data_dir = prepared_data_dir
        print(f'Looking for pickles in {self.prepared_data_dir}')

        if len(utils.find('*serialized.pkl', self.prepared_data_dir)) == 2:
            print('This domain-period combination has been serialized before, loading objects...')
            for pkl in utils.find('*.pkl', self.prepared_data_dir):
                if "input_ds" in pkl: self.input_ds_serialized_path = pkl
                elif "rf_ds" in pkl: self.rf_ds_serialized_path = pkl
        else: 
            print('Proceeding to load & serialize raw data. ')
            self.raw_input_dir = prepare.get_raw_input_data(self)
            self.raw_rf_dir = prepare.get_raw_target_data(self)
            print(f'Raw input datasets taken from @: \n{self.raw_input_dir}')
            print(f'Raw rainfall datasets taken from @: \n{self.raw_rf_dir}')
            self.input_ds_serialized_path, self.rf_ds_serialized_path = prepare.prepare_dataset(self, self.prepared_data_dir)
        print(f'Serialized raw input datasets @: \n{self.input_ds_serialized_path}')
        print(f'Serialized raw RF datasets @: \n{self.rf_ds_serialized_path}')
Example #5
0
def brickfind_crawl(brick, args):
    if brick.endswith("/"):
        brick = brick[0:len(brick)-1]

    working_dir = os.path.dirname(args.outfile)
    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)

    with open(args.outfile, "a+") as fout:
        brick_path_len = len(brick)

        def output_callback(path, filter_result):
            path = path.strip()
            path = path[brick_path_len+1:]
            output_write(fout, path, args.output_prefix,
                         encode=(not args.no_encode), tag=args.tag)

        ignore_dirs = [os.path.join(brick, dirname)
                       for dirname in
                       conf.get_opt("brick_ignore_dirs").split(",")]

        find(brick, callback_func=output_callback,
             ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
Example #6
0
def brickfind_crawl(brick, args):
    if brick.endswith("/"):
        brick = brick[0:len(brick)-1]

    working_dir = os.path.dirname(args.outfile)
    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)

    with open(args.outfile, "a+") as fout:
        brick_path_len = len(brick)

        def output_callback(path, filter_result, is_dir):
            path = path.strip()
            path = path[brick_path_len+1:]

            if args.type == "both":
                output_write(fout, path, args.output_prefix,
                             encode=(not args.no_encode), tag=args.tag,
                             field_separator=args.field_separator)
            else:
                if (is_dir and args.type == "d") or (
                    (not is_dir) and args.type == "f"):
                    output_write(fout, path, args.output_prefix,
                    encode=(not args.no_encode), tag=args.tag,
                    field_separator=args.field_separator)

        ignore_dirs = [os.path.join(brick, dirname)
                       for dirname in
                       conf.get_opt("brick_ignore_dirs").split(",")]

        find(brick, callback_func=output_callback,
             ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
Example #7
0
def test_authorization(browser):
    # Data
    auth_heading = 'Войти'
    page_auth_locator = 'form#login_form h2'

    # Arrange
    browser.get(_locators.main_page_link)

    utils.find(browser, _locators.login_link).click()

    page_auth_detector = utils.find(browser, page_auth_locator).text
    assert auth_heading in page_auth_detector, \
        "Search heading '%s' should contain text '%s'" % (page_auth_detector, auth_heading)

    # Steps
    utils.authorizate(browser, test_data.email, test_data.password)

    WebDriverWait(browser, 5).until(
        EC.presence_of_element_located(
            (By.XPATH, _locators.success_message_locator)))

    # Assert
    success_message_text = utils.find_xpath(
        browser, _locators.success_message_locator).text
    assert _locators.success_message in success_message_text, \
        "Search success message '%s' should contain text '%s'" % (success_message_text, _locators.success_message)
Example #8
0
def gfid_to_path_using_batchfind(brick, gfids_file, output_file):
    """
    find -samefile gets the inode number and crawls entire namespace
    to get the list of files/dirs having same inode number.
    Do find without any option, except the ignore directory option,
    print the output in <INODE_NUM> <PATH> format, use this output
    to look into in-memory dictionary of inode numbers got from the
    list of GFIDs
    """
    with open(output_file, "a+") as fout:
        inode_dict = {}
        with open(gfids_file) as f:
            for gfid in f:
                gfid = gfid.strip()
                backend_path = os.path.join(brick, ".glusterfs",
                                            gfid[0:2], gfid[2:4], gfid)

                try:
                    inode_dict[str(os.stat(backend_path).st_ino)] = 1
                except (IOError, OSError) as e:
                    if e.errno == ENOENT:
                        continue
                    else:
                        fail("%s Failed to convert to path from "
                             "GFID %s: %s" % (brick, gfid, e), logger=logger)

        if not inode_dict:
            return

        def inode_filter(path):
            try:
                st = os.lstat(path)
            except (OSError, IOError) as e:
                if e.errno == ENOENT:
                    st = None
                else:
                    raise

            if st and inode_dict.get(str(st.st_ino), None):
                return True

            return False

        brick_path_len = len(brick)

        def output_callback(path):
            path = path.strip()
            path = path[brick_path_len+1:]
            output_write(fout, path, args.output_prefix)

        ignore_dirs = [os.path.join(brick, dirname)
                       for dirname in
                       conf.get_opt("brick_ignore_dirs").split(",")]
        # Length of brick path, to remove from output path
        find(brick, callback_func=output_callback,
             filter_func=inode_filter,
             ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
Example #9
0
def test_find():
    looking_for = {'a': 1, 'match': True}
    foo = [looking_for, {'b': 1}, {'a': 2}]
    match = find(foo, {'a': 1})
    assert match == looking_for
    match = find(foo, {'c': 1})
    assert not match
Example #10
0
    def filter(self, pts2D):
        """ Filter image points based on loaded mask. 
        
        Usage: Mask = BaseMask('mask.png')
               FilteredPts = Mask.filter(pts)
        
        Input:
            pts - 2-by-N numpy array of 2D points

        Output:
            FilteredPts - 2-by-N numpy array of 2D points. Those that agree 
            with the mask (i.e., those points [x, y] s.t. mask[x,y] = True)
            remain the same, while invalid points are zeroed.
        """
        
        # Find points that are not zero in image space
        idxPts = utils.find( np.logical_and(pts2D[0, :] > 0, pts2D[1,:] > 0) )

        # Get rounded and flattened (1D) pixel indexes for our 2D points
        idxPtsFlat = utils.ravel_index(pts2D[::-1, idxPts].astype(int), \
                                       self.data.shape)

        # Find invalid 2D points by comparing them to the mask
        idxInvalidPts = utils.find( self.data.flat[idxPtsFlat] == 0 )

        # Set invalid points to zero
        pts2D[:, idxPts[idxInvalidPts]] = 0

        return pts2D
Example #11
0
def test_add_product_to_cart(browser):
    # Data
    success_add_message_locator = 'div.alertinner'
    success_message = 'был добавлен в вашу корзину'
    item_in_cart_locator = 'div.alertinner strong'
    item_in_cart_name = 'h3 a'

    # Arrange
    browser.get(_locators.main_page_link)

    utils.find_xpath(browser, _locators.catalog_link).click()

    page_catalog_detector = utils.find(browser,
                                       _locators.catalog_heading_locator).text
    assert _locators.catalog_heading in page_catalog_detector, \
        "Search heading '%s' should contain text '%s'" % (page_catalog_detector, _locators.catalog_heading)

    # Steps
    utils.click_add_to_cart(browser)

    success_add_message = utils.find(browser, success_add_message_locator).text
    assert success_message in success_add_message, \
        "Search heading '%s' should contain text '%s'" % (success_add_message, success_message)

    what_is_in_cart = utils.find(browser, item_in_cart_locator).text

    utils.view_cart(browser)

    cart = utils.find(browser, item_in_cart_name).text

    # Assert
    assert what_is_in_cart == cart, "Wrong item in the cart"
Example #12
0
def parse_info_html(account, info_html, track_id):
    """
    组织raw对象
    :return:
    """
    raw = ResumeRaw()
    raw.trackId = track_id
    raw.source = 'FIVE_ONE'
    raw.content = info_html
    raw.email = account['userName']

    tree = xmlh.document_fromstring(info_html)
    img_url = common_utils.find(
        '.*?(/Candidate/ReadAttach.aspx\?UserID=\\d+).*?', info_html)

    try:
        submit_times = common_utils.find(
            '投递时间:<span class="blue_txt">(.*?)</span>', info_html)
        if submit_times:
            raw.resumeSubmitTime = submit_times
        update_times = tree.xpath('//*[@id="lblResumeUpdateTime"]/text()')
        if update_times:
            raw.resumeUpdateTime = str(update_times[0])
    except Exception as e:
        logger.error(e.message)

    if img_url:

        url = "http://ehire.51job.com/" + img_url
        oss_addr = inbox_utils.save_mobile_imgs_to_oss(url, 2, track_id)
        if oss_addr:
            raw.avatarUrl = oss_addr
    return raw
Example #13
0
    def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset
        """
        Take previously pre-processed datasets & split into
        input: x_train, x_test,
        target/RF: y_train, y_test
        """
        for alpha in range(1, self.ALPHAs+1):
            if alpha != self.ALPHAs:
                gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')
            else:
                gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')
            new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')
            os.makedirs(new_cluster_dir, exist_ok=True)

            new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')
            os.makedirs(new_prepared_data_dir, exist_ok=True)
            
            if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):
                pass
            else:
                if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):
                    print(f"=> No input datasets pre-processed for alpha of {alpha}")
                    prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)

                if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):
                    print(f"=> No rainfall datasets pre-processed for alpha of {alpha}")
                    prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)
                
                print(f'Preprocessed pickles for alpha split {alpha} can be found @:\n{new_prepared_data_dir}')           
Example #14
0
    def h2(self):
        goal1_hd = 0
        goal2_hd = 0

        for row in range(self.max_row + 1):
            for col in range(self.max_col + 1):
                if (self.state[row][col] != 0):
                    if self.state[row][col] != Node.goal1[row][col]:
                        goal1_hd += 1
                    if self.state[row][col] != Node.goal2[row][col]:
                        goal2_hd += 1

        if (goal1_hd < goal2_hd):
            total_cost = 0

            for row in range(self.max_row + 1):
                for col in range(self.max_col + 1):
                    if self.state[row][col] != Node.goal1[row][col]:
                        goal_r, goal_c = find(Node.goal1, self.state[row][col])
                        total_cost += getLowestCost(row, col, goal_r, goal_c)
            return total_cost

        else:
            total_cost = 0

            for row in range(self.max_row + 1):
                for col in range(self.max_col + 1):
                    if self.state[row][col] != Node.goal2[row][col]:
                        goal_r, goal_c = find(Node.goal2, self.state[row][col])
                        total_cost += getLowestCost(row, col, goal_r, goal_c)
            return total_cost
Example #15
0
    def compile_scores(self):
        if utils.find(f'*Brier_scores_weighted-avg_boxplot_with_bootstrapped_whole-model_mean*.png', self.alpha_general_dir): pass
        else:
            print('Plotting mean brier scores across all alphas...')
            evaluation.mean_brier_scores_all_alphas(self)

        
        if utils.find(f'*ROC_whole-model-mean_all-alphas_micro-avged*.png', self.alpha_general_dir): pass
        else:
            print('Plotting ROC across all alphas...')
            evaluation.ROC_all_alphas(self)

        
        if utils.find(f'*gridded_AUC_whole-model_v2*.png', self.alpha_general_dir): pass
        else:
            print('Plotting gridded AUC across all alphas...')
            evaluation.gridded_AUC_all_alphas(self)

                
        if utils.find(f'*gridded_brier_whole-model_v2*.png', self.alpha_general_dir): pass
        else:
            print('Plotting gridded brier scores across all alphas...')
            evaluation.gridded_brier_all_alphas(self)

        # print(f'DEBUGGING: {utils.time_now()} - Plotting gridded AUC across all alphas...')
        # evaluation.gridded_AUC_all_alphas(self)

        # print(f'DEBUGGING: {utils.time_now()} - Plotting gridded brier scores across all alphas...')
        # evaluation.gridded_brier_all_alphas(self)
        
        with open(f'{self.alpha_general_dir}/flag', 'w+') as flag: pass # write flag to signal evaluation completed
Example #16
0
    def detect_prepared_datasets(self):
        """
        Pre-processing, including time-slicing, removal of NAs, stacking & standardizing.
        calls - 
        1. prepare.preprocess_time_series
        2. prepare.flatten_and_standardize_dataset`
        """
        if utils.find('*target_ds_preprocessed.pkl', self.prepared_data_dir) and \
            utils.find('*rf_ds_preprocessed.pkl', self.prepared_data_dir) and \
            utils.find('*standardized_stacked_arr.pkl', self.prepared_data_dir):
            print('Pickles (preprocessed) found.')
            for pkl in utils.find('*preprocessed.pkl', self.prepared_data_dir):
                if "target_ds" in pkl: self.target_ds_preprocessed_path = pkl
                elif "rf_ds" in pkl: self.rf_ds_preprocessed_path = pkl
            
            LocalModelParams(self, utils.open_pickle(self.target_ds_preprocessed_path))

            for pkl in utils.find('*standardized_stacked_arr.pkl', self.prepared_data_dir):
                self.standardized_stacked_arr_path = pkl
        else:
            print('Pickles of pre-processed data incomplete. Proceeding to load & process raw dataset pickles.')
            self.target_ds_preprocessed_path, self.rf_ds_preprocessed_path = prepare.preprocess_time_series(self, self.prepared_data_dir, self.ALPHAs)

            LocalModelParams(self, utils.open_pickle(self.target_ds_preprocessed_path)) # generate new local model params

            self.standardized_stacked_arr_path = prepare.flatten_and_standardize_dataset(self, self.prepared_data_dir)
        print(f'--> Months for this dataset are: {self.month_names}')
Example #17
0
def start_one_job(account):
    """开启一个帐号的任务
    :account dict : 帐号,含有cookie
    """
    global proxy
    track_id = str(uuid.uuid1())
    url = "http://ehire.51job.com/Inbox/InboxRecentEngine.aspx?Style=1"
    refer_url = "http://ehire.51job.com/Navigate.aspx?ShowTips=11&PwdComplexity=N"
    proxy = common_utils.get_proxy()
    list_html = conn_html(account,
                          url,
                          5,
                          refer_url=refer_url,
                          track_id=track_id)
    # list_html = open('text_htl').read()  # 测试
    while True:
        if list_html:
            if 'login' == list_html:
                # 需要登录
                logger.error("出现登录页面 %s" % account['userName'])
                return 'login'
            else:
                hidEngineCvlogIds = common_utils.find(
                    '<input name="hidEngineCvlogIds" type="hidden" id="hidEngineCvlogIds" value="(.*?)" />',
                    list_html)
                __VIEWSTATE = common_utils.find(
                    '<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="(.*?)" />',
                    list_html)
                resume_ids = parse_list_html(list_html, track_id=track_id)
                if 'none' == resume_ids:
                    logger.info("邮箱没有邮件了--%s" % account['userName'])
                    return 'over'
                elif 'refer-login' == resume_ids:
                    logger.error("出现登录页面 %s" % account['userName'])
                    return 'login'

                if resume_ids:
                    ids_for = list(resume_ids)
                    logger.info('简历个数: %s' % len(resume_ids))
                    for id in ids_for:
                        info_url = 'http://ehire.51job.com/Candidate/ResumeViewFolder.aspx?hidSeqID=%s&hidFolder=EMP' % id
                        flag = info_main(account, info_url, track_id)
                        # flag = True
                        if 'login' == flag:
                            logger.error("出现登录页面 %s" % account['userName'])
                            return 'login'
                        if not flag:  # 失败?
                            resume_ids.remove(id)

                    # 测试
                    # resume_ids = ['9229836941', ]
                    data = get_refer_data(resume_ids, __VIEWSTATE,
                                          hidEngineCvlogIds)
                    # list_html = refer_list_html(account, data, 4)
                else:  # 解析失败
                    logger.error("页面 未能解析出简历%s" % account['userName'])
                    return 'error'
        else:  # 解析失败
            logger.error("出现错误页面 %s" % account['userName'])
            return 'error'
Example #18
0
def parse_logical_bgp_sessions(data):
    peerings = []

    for ls in find('collection.junos:logical-systems', data, default=[]):
      for group in find('protocols.bgp.group', ls, default=[]):
        parse_bgp_group(group, peerings)
    return peerings
Example #19
0
def gfid_to_path_using_batchfind(brick, changelog_data):
    # If all the GFIDs converted using gfid_to_path_using_pgfid
    if not changelog_data.inodegfid_exists({"converted": 0}):
        return

    def inode_filter(path):
        # Looks in inodegfid table, if exists returns
        # inode number else None
        try:
            st = os.lstat(path)
        except (OSError, IOError):
            st = None

        if st and changelog_data.inodegfid_exists({"inode": st.st_ino}):
            return st.st_ino

        return None

    # Length of brick path, to remove from output path
    brick_path_len = len(brick)

    def output_callback(path, inode):
        # For each path found, encodes it and updates path1
        # Also updates converted flag in inodegfid table as 1
        path = path.strip()
        path = path[brick_path_len + 1 :]
        path = output_path_prepare(path, args.output_prefix)

        changelog_data.append_path1(path, inode)

    ignore_dirs = [os.path.join(brick, dirname) for dirname in conf.get_opt("brick_ignore_dirs").split(",")]

    # Full Namespace Crawl
    find(brick, callback_func=output_callback, filter_func=inode_filter, ignore_dirs=ignore_dirs)
Example #20
0
def prepare_dataset(model, dest):
    """
    - xr.open_mfdataset() = loading
    - restricting to certain variables + "levels" of variables
    - combining variables xarrays into one
    - restricting to only between 1999 to 2019
    - slicing domain dimensions up to required specs (i.e. model.LON_S, model.LON_N, etc...)
    - slicing up to chosen period only
    - pickling the datasets (both input & rainfall) & returning them
    """
    # searching for raw data pickles
    preloaded_input_pickles = utils.find('*.pkl', model.raw_input_dir)
    if preloaded_input_pickles:
        print('Preloaded raw INPUT data pickles found...')
        ds_CHOSEN_VARS_renamed = utils.open_pickle(utils.find('*.pkl', model.raw_input_dir)[0])
    else: 
        print('Creating pickles of raw input data...')
        ds_CHOSEN_VARS_renamed = save_preloaded_raw_input_data(model)
    
    preloaded_input_pickles = utils.find('*.pkl', model.raw_rf_dir)
    if preloaded_input_pickles:
        print('Preloaded raw rainfall data pickles found...')
        ds_RAINFALL = utils.open_pickle(utils.find('*.pkl', model.raw_rf_dir)[0])
    else: 
        print('Creating pickles of raw rainfall data...')
        ds_RAINFALL = save_preloaded_raw_rf_data(model)

    print("Proceeding to do preliminary data cleaning...")
    ds_sliced = ds_CHOSEN_VARS_renamed.sel(
        level=slice(np.min(model.unique_pressure_lvls),np.max(model.unique_pressure_lvls)), 
        lat=slice(model.LAT_N,model.LAT_S), lon=slice(model.LON_W,model.LON_E),
        time=slice('1999', '2019'))
    ds_sliced_rhum = ds_sliced.rhum
    ds_sliced_rhum_no925 = ds_sliced_rhum.drop_sel({"level":925})
    ds_sliced_uwnd_only = ds_sliced.uwnd
    ds_sliced_vwnd_only = ds_sliced.vwnd
    ds_combined_sliced = xr.merge([ds_sliced_rhum_no925, ds_sliced_uwnd_only, ds_sliced_vwnd_only], compat='override')

    rf_ds_sliced = ds_RAINFALL.sel(lat=slice(model.LAT_S, model.LAT_N), lon=slice(model.LON_W,model.LON_E))
    print('Pickling domain- & feature-constrained input & RF datasets...')
    if model.period == "NE_mon":
        input_ds = ds_combined_sliced.sel(time=is_NE_mon(ds_combined_sliced['time.month']))
        rf_ds = rf_ds_sliced.sel(time=is_NE_mon(rf_ds_sliced['time.month']))
        input_ds_serialized_path = utils.to_pickle('raw_input_ds_NE_mon_serialized', input_ds, dest)
        rf_ds_serialized_path = utils.to_pickle('raw_rf_ds_NE_mon_serialized', rf_ds, dest)
        return input_ds_serialized_path, rf_ds_serialized_path
    elif model.period == "SW_mon":
        input_ds = ds_combined_sliced.sel(time=is_SW_mon(ds_combined_sliced['time.month']))
        rf_ds = rf_ds_sliced.sel(time=is_SW_mon(rf_ds_sliced['time.month']))
        input_ds_serialized_path = utils.to_pickle('raw_input_ds_SW_mon_serialized', input_ds, dest)
        rf_ds_serialized_path = utils.to_pickle('raw_rf_ds_SW_mon_serialized', rf_ds, dest)
        return input_ds_serialized_path, rf_ds_serialized_path
    elif model.period == "inter_mon":
        input_ds = ds_combined_sliced.sel(time=is_inter_mon(ds_combined_sliced['time.month']))
        rf_ds = rf_ds_sliced.sel(time=is_inter_mon(rf_ds_sliced['time.month']))
        input_ds_serialized_path = utils.to_pickle('raw_input_ds_inter_mon_serialized', input_ds, dest)
        rf_ds_serialized_path = utils.to_pickle('raw_rf_ds_inter_mon_serialized', rf_ds, dest)
        return input_ds_serialized_path, rf_ds_serialized_path
Example #21
0
def gfid_to_path_using_pgfid(brick, changelog_data, args):
    """
    For all the pgfids collected, Converts to Path and
    does readdir on those directories and looks up inodegfid
    table for matching inode number.
    """
    populate_pgfid_and_inodegfid(brick, changelog_data)

    # If no GFIDs needs conversion to Path
    if not changelog_data.inodegfid_exists({"converted": 0}):
        return

    def inode_filter(path):
        # Looks in inodegfid table, if exists returns
        # inode number else None
        try:
            st = os.lstat(path)
        except (OSError, IOError):
            st = None

        if st and changelog_data.inodegfid_exists({"inode": st.st_ino}):
            return st.st_ino

        return None

    # Length of brick path, to remove from output path
    brick_path_len = len(brick)

    def output_callback(path, inode):
        # For each path found, encodes it and updates path1
        # Also updates converted flag in inodegfid table as 1
        path = path.strip()
        path = path[brick_path_len + 1:]

        path = output_path_prepare(path, args)

        changelog_data.append_path1(path, inode)
        changelog_data.inodegfid_update({"converted": 1}, {"inode": inode})

    ignore_dirs = [
        os.path.join(brick, dirname)
        for dirname in conf.get_opt("brick_ignore_dirs").split(",")
    ]

    for row in changelog_data.pgfid_get():
        try:
            path = symlink_gfid_to_path(brick, row[0])
            find(os.path.join(brick, path),
                 callback_func=output_callback,
                 filter_func=inode_filter,
                 ignore_dirs=ignore_dirs,
                 subdirs_crawl=False)
        except (IOError, OSError) as e:
            logger.warn("Error converting to path: %s" % e)
            continue
Example #22
0
    def train_SOM(self, alpha=None):
        d_hp_dir_path = str(utils.models_dir / self.dir_hp_str)
        self.d_hp_dir_path = d_hp_dir_path
        os.makedirs(d_hp_dir_path, exist_ok=True)
        if not utils.find(f'*extent_{self.dir_str}.png', self.d_hp_dir_path):
            visualization.get_domain_geometry(self, self.d_hp_dir_path)
            
        models_dir_path = str(utils.models_dir / self.dir_hp_str / self.period) + f'_{self.month_names_joined}'
        os.makedirs(models_dir_path, exist_ok=True)
        self.models_dir_path = models_dir_path
        # utils.update_cfgfile('Paths', 'models_dir_path', self.models_dir_path)

        if alpha:
            destination = self.alpha_model_dir
            arr_path = self.alpha_standardized_stacked_arr_path
            prefix = f'alpha_{alpha}_'
            prompt = f'< alpha-{alpha} >'
        else:
            destination = self.models_dir_path
            arr_path = self.standardized_stacked_arr_path
            prefix = ''
            prompt = ''

        print(f'Destination: "{destination}", arr_path: "{arr_path}", prefix: "{prefix}"')

        if utils.find(f'*{prefix}som_model.pkl', destination):
            print(f'{utils.time_now()} - SOM model trained before, skipping...')
            self.som_model_path = utils.find(f'*{prefix}som_model.pkl', destination)[0]
        else:
            print(f'{utils.time_now()} - {prompt} No SOM model trained for {self.domain}, {self.period}, for {self.hyperparameters}, doing so now...')

            standardized_stacked_arr = utils.open_pickle(arr_path)

            sominitstarttime = timer(); print(f'{utils.time_now()} - Initializing MiniSom... ')
            som = MiniSom(self.gridsize, self.gridsize, # square
                        standardized_stacked_arr.shape[1],
                        sigma=self.sigma, learning_rate=self.learning_rate,
                        neighborhood_function='gaussian', random_seed=self.random_seed)
            """
            Note: initializing PCA for weights is faster (~1/2 hour), but for serialized arrays > 300mb, 
            chances are this will kill the RAM and halt the entire process. 
            """
##            try:
##                som.pca_weights_init(standardized_stacked_arr)
##            except MemoryError as e:
##                print(f'Memory error has occured: \n{e}')
            print(f"Initialization took {utils.time_since(sominitstarttime)}.\n")

            trainingstarttime = timer(); print(f"{utils.time_now()} - Beginning training.")
            getattr(som, self.training_mode)(standardized_stacked_arr, self.iterations, verbose=True)
            q_error = np.round(som.quantization_error(standardized_stacked_arr), 2)
            print(f"Training complete. Q error is {q_error}, time taken for training is {utils.time_since(trainingstarttime)}s\n")

            if alpha: self.som_model_path = utils.to_pickle(f'{self.RUN_datetime}_{prefix}som_model', som, destination)
            else: self.som_model_path = utils.to_pickle(f'{self.RUN_datetime}_{prefix}som_model', som, destination)
Example #23
0
def gfid_to_path_using_pgfid(brick, changelog_data, args):
    """
    For all the pgfids collected, Converts to Path and
    does readdir on those directories and looks up inodegfid
    table for matching inode number.
    """
    populate_pgfid_and_inodegfid(brick, changelog_data)

    # If no GFIDs needs conversion to Path
    if not changelog_data.inodegfid_exists({"converted": 0}):
        return

    def inode_filter(path):
        # Looks in inodegfid table, if exists returns
        # inode number else None
        try:
            st = os.lstat(path)
        except (OSError, IOError):
            st = None

        if st and changelog_data.inodegfid_exists({"inode": st.st_ino}):
            return st.st_ino

        return None

    # Length of brick path, to remove from output path
    brick_path_len = len(brick)

    def output_callback(path, inode):
        # For each path found, encodes it and updates path1
        # Also updates converted flag in inodegfid table as 1
        path = path.strip()
        path = path[brick_path_len+1:]

        path = output_path_prepare(path, args)

        changelog_data.append_path1(path, inode)
        changelog_data.inodegfid_update({"converted": 1}, {"inode": inode})

    ignore_dirs = [os.path.join(brick, dirname)
                   for dirname in
                   conf.get_opt("brick_ignore_dirs").split(",")]

    for row in changelog_data.pgfid_get():
        try:
            path = symlink_gfid_to_path(brick, row[0])
            find(os.path.join(brick, path),
                 callback_func=output_callback,
                 filter_func=inode_filter,
                 ignore_dirs=ignore_dirs,
                 subdirs_crawl=False)
        except (IOError, OSError) as e:
            logger.warn("Error converting to path: %s" % e)
            continue
Example #24
0
def parse_logical_interfaces(data, interfaces):
    # make interface map for quick lookups
    if_map = { i.name: i for i in interfaces }
    for ls in find('collection.junos:logical-systems', data, default=[]):
        for i in find('interfaces.interface', ls, default=[]):
            iface = parse_interface(i, logical_system=ls['name'])
            if iface.name in if_map:
                if_map[iface.name].unitdict += iface.unitdict
            else:
                interfaces.append(iface)
    return interfaces
Example #25
0
    def test_simple_nested(self):
        data = {
            'test': {
                'hest': {
                    'test': 'best'
                }
            }
        }

        self.assertEqual(find('test.hest.test', data), 'best')
        self.assertIn('test', find('test.hest', data))
Example #26
0
def main():
  aunts = parse(sys.stdin.readlines())
  
  similar = lambda ref, x: all(x.get(k, v) == v for k, v in ref.items())
  best_aunt = find(partial(similar, REF_AUNT), aunts)
  print('part 1:', best_aunt['id'])

  key2cmp = {'cats': ge, 'trees': ge, 'pomeranians': le, 'goldfish': le}
  compare = lambda key, fst, snd: key2cmp.get(key, eq)(fst, snd)
  similar = lambda ref, x: all(compare(k, x.get(k, v), v) for k, v in ref.items())
  best_aunt = find(partial(similar, REF_AUNT), aunts)
  print('part 2:', best_aunt['id'])
Example #27
0
def parse_router(data, chassis_data=None):
    router_data = data['tailf-ncs:device']

    router = Router()
    name = hostname_clean(router_data['address'])
    router.name = name
    router.version = find('config.junos:configuration.version', router_data)
    if chassis_data:
        chassis = find('junos-rpc:output.chassis-inventory.chassis', chassis_data, default={})
        if chassis:
            router.model = chassis['description']
        router.hardware = parse_chassis(chassis)
    return router
Example #28
0
def main():
    aunts = parse(sys.stdin.readlines())

    similar = lambda ref, x: all(x.get(k, v) == v for k, v in ref.items())
    best_aunt = find(partial(similar, REF_AUNT), aunts)
    print('part 1:', best_aunt['id'])

    key2cmp = {'cats': ge, 'trees': ge, 'pomeranians': le, 'goldfish': le}
    compare = lambda key, fst, snd: key2cmp.get(key, eq)(fst, snd)
    similar = lambda ref, x: all(
        compare(k, x.get(k, v), v) for k, v in ref.items())
    best_aunt = find(partial(similar, REF_AUNT), aunts)
    print('part 2:', best_aunt['id'])
Example #29
0
def parse_interface(item, logical_system=None):
    iface = Interface()
    iface.name = item['name']
    iface.description = item.get('description')
    iface.vlantagging = 'vlan-tagging' in item or 'flexible-vlan-tagging' in item
    iface.unitdict = [parse_unit(u, logical_system) for u in item.get('unit', [])]
    iface.bundle = find_first('bundle', item) or None
    iface.tunneldict = [
        {
            'source': find('tunnel.source', u),
            'destination': find('tunnel.destination', u)
        } for u in item.get('unit', []) if 'tunnel' in u
    ]
    return iface
Example #30
0
    def h1(self):
        total_cost1 = 0
        total_cost2 = 0

        for row in range(self.max_row + 1):
            for col in range(self.max_col + 1):
                if self.state[row][col] != Node.goal1[row][col]:
                    goal_r, goal_c = find(Node.goal1, self.state[row][col])
                    total_cost1 += getLowestCost(row, col, goal_r, goal_c)
                if self.state[row][col] != Node.goal2[row][col]:
                    goal_r, goal_c = find(Node.goal2, self.state[row][col])
                    total_cost2 += getLowestCost(row, col, goal_r, goal_c)

        return min(total_cost1, total_cost2)
Example #31
0
def findType(location,
             bounds,
             data,
             allData,
             placeType,
             writeYmls=False,
             useCached=False):
    print('findType ' + str(placeType) + ' cached=' + str(useCached))
    typeData = {}
    allTypeData = []
    if (useCached):
        try:
            stream = open(criterionName + '.' + placeType + '.all.yml', 'r')
            cachedData = load(stream, Loader=Loader)
            stream.close()
        except Exception as e:
            print(str(e))
            raise Exception('ERROR: Failed to load yaml file ' + placeType +
                            '.all.yml')

        allTypeData = cachedData
        for p in cachedData:
            gRec = {
                'name': p['name'],
                'vicinity': p['vicinity'],
                'location': p['geometry']['location']
            }
            typeData[p['place_id']] = gRec
    else:
        utils.find(location,
                   bounds,
                   typeData,
                   allTypeData,
                   placeType=placeType,
                   doSplit=True)
    data.update(typeData)
    allData += allTypeData

    if (writeYmls):
        with open(criterionName + '.' + placeType + '.yml', 'w') as yaml_file:
            dump(typeData, yaml_file, default_flow_style=False, Dumper=Dumper)

        if (not useCached):
            with open(criterionName + '.' + placeType + '.all.yml',
                      'w') as yaml_file:
                dump(allTypeData,
                     yaml_file,
                     default_flow_style=False,
                     Dumper=Dumper)
Example #32
0
def test_button_add_to_cart(browser):
    browser.get(_locators.main_page_link)
    utils.find_xpath(browser, _locators.catalog_link).click()

    WebDriverWait(browser, 10).until(
        EC.presence_of_element_located(
            (By.CSS_SELECTOR, _locators.book_locator)))

    utils.find(browser, _locators.book_locator).click()

    WebDriverWait(browser, 10).until(
        EC.presence_of_element_located(
            (By.XPATH, _locators.button_add_to_cart)))
    button_to_add = browser.find_element_by_xpath(_locators.button_add_to_cart)
    assert button_to_add is not None, "There is no button(("
        def get_arch_srcs(prebuilt, variant):
            """Returns build rule for arch specific srcs.

            e.g.,
                arch: {
                    arm: {
                        srcs: ["..."]
                    },
                    arm64: {
                        srcs: ["..."]
                    },
                }

            Args:
              prebuilt: string, name of prebuilt object
              variant: string, VNDK snapshot variant (e.g. 'arm64')
            """
            arch_srcs = '{ind}arch: {{\n'.format(ind=self.INDENT)
            variant_path = os.path.join(self._install_dir, variant)
            src_paths = utils.find(variant_path, [prebuilt])
            for src in sorted(src_paths):
                arch_srcs += (
                    '{ind}{ind}{arch}: {{\n'
                    '{ind}{ind}{ind}srcs: ["{src}"],\n'
                    '{ind}{ind}}},\n'.format(
                        ind=self.INDENT,
                        arch=utils.arch_from_path(os.path.join(variant, src)),
                        src=src))
            arch_srcs += '{ind}}},\n'.format(ind=self.INDENT)
            return arch_srcs
Example #34
0
def main():
    with open(find('setup_agriculture.json', '/')) as f:
        setup = json.load(f)

    local_storage: str = setup.get('local_storage')

    logging.basicConfig(
        filename=os.path.join(local_storage, 'log.log'),
        level=logging.WARNING,
        format='%(asctime)s %(levelname)s %(name)s %(message)s')

    temperature_port: int = setup['temperature'].get('temperature_port')
    period: int = setup['temperature'].get('period')
    wait: float = setup['temperature'].get('wait')

    temperature_sensor = Temperature(temperature_port)

    filename = os.path.join(local_storage,
                            'temperature_' + str(date.today()) + '.txt')

    if not os.path.exists(filename):
        with open(filename, 'w+') as f:
            f.write('Timestamp, Temperature\n')

    while exit_on_time(setup['temperature'].get('exit_time')):
        measurement = catch_measurement(sensor=temperature_sensor,
                                        period=period,
                                        wait=wait)
        save_measurement(measurement=measurement, path=filename)

    quit()
Example #35
0
def get_or_create_test_host(c):
    instances = all_hosts(c)
    test_host = find(instances, {'tags': {'name': 'test_host', 'type': 'skipper'}})
    if not test_host:
        test_host = create_host(c)
        test_host.add_tag('name', 'test_host')
    return test_host
def write_split_csv(save_dir, csv_dir, train_percent=0.8, test_percent=0.1):
    os.makedirs(save_dir, exist_ok=True)

    slides = find('*.csv', csv_dir)
    random.shuffle(slides)

    test_num = min(1, int(test_percent * len(slides)))

    test_slides = slides[:test_num]
    train_valid_slides = slides[test_num:]

    test_set = pd.DataFrame()
    for s in test_slides:
        test_set = test_set.append(pd.read_csv(s))

    train_valid_set = pd.DataFrame()
    for s in train_valid_slides:
        train_valid_set = train_valid_set.append(pd.read_csv(s))

    train_valid_shuffled = train_valid_set.sample(frac=1)

    # Calculate index for split 80:20 ratio
    train_valid_index = round(len(train_valid_shuffled) * train_percent)

    # Split into training and test sets
    train_set = train_valid_shuffled[:train_valid_index].reset_index(drop=True)
    valid_set = train_valid_shuffled[train_valid_index:].reset_index(drop=True)

    train_set.to_csv(os.path.join(save_dir, 'train-compress.csv'), index=False)
    test_set.to_csv(os.path.join(save_dir, 'test-compress.csv'), index=False)
    valid_set.to_csv(os.path.join(save_dir, 'valid-compress.csv'), index=False)
Example #37
0
 def request_process_info(self,regex=None):
     # info_list = [download.info_model() for download in self.processes]
     if regex:
         if find(regex , self.processes[-1].file_name):
             self.downloadInfo_returned.emit(self.processes[-1].info_model())
     else:
         self.downloadInfo_returned.emit(self.processes[-1].info_model())
        def get_arch_srcs(prebuilt, arch):
            """Returns build rule for arch specific srcs.

            e.g.,
                arch: {
                    arm: {
                        srcs: ["..."]
                    },
                    arm64: {
                        srcs: ["..."]
                    },
                }

            Args:
              prebuilt: string, name of prebuilt object
              arch: string, VNDK snapshot arch (e.g. 'arm64')
            """
            arch_srcs = '{ind}arch: {{\n'.format(ind=self.INDENT)
            src_paths = utils.find(src_root, [prebuilt])
            # filter out paths under 'binder32' subdirectory
            src_paths = filter(lambda src: not src.startswith(utils.BINDER32),
                               src_paths)

            for src in sorted(src_paths):
                arch_srcs += ('{ind}{ind}{arch}: {{\n'
                              '{ind}{ind}{ind}srcs: ["{src}"],\n'
                              '{ind}{ind}}},\n'.format(
                                  ind=self.INDENT,
                                  arch=utils.prebuilt_arch_from_path(
                                      os.path.join(arch, src)),
                                  src=src))
            arch_srcs += '{ind}}},\n'.format(ind=self.INDENT)
            return arch_srcs
        def get_arch_srcs(prebuilt, arch):
            """Returns build rule for arch specific srcs.

            e.g.,
                arch: {
                    arm: {
                        srcs: ["..."]
                    },
                    arm64: {
                        srcs: ["..."]
                    },
                }

            Args:
              prebuilt: string, name of prebuilt object
              arch: string, VNDK snapshot arch (e.g. 'arm64')
            """
            arch_srcs = '{ind}arch: {{\n'.format(ind=self.INDENT)
            src_paths = utils.find(src_root, [prebuilt])
            # filter out paths under 'binder32' subdirectory
            src_paths = filter(lambda src: not src.startswith(utils.BINDER32),
                               src_paths)

            for src in sorted(src_paths):
                arch_srcs += ('{ind}{ind}{arch}: {{\n'
                              '{ind}{ind}{ind}srcs: ["{src}"],\n'
                              '{ind}{ind}}},\n'.format(
                                  ind=self.INDENT,
                                  arch=utils.prebuilt_arch_from_path(
                                      os.path.join(arch, src)),
                                  src=src))
            arch_srcs += '{ind}}},\n'.format(ind=self.INDENT)
            return arch_srcs
Example #40
0
File: cme.py Project: poneill/amic
def rate_matrix(q,koffs,verbose=False):
    """Generate the stochastic rate matrix for the givens system."""
    # Chromosome states can be represented by binary numerals; order the
    # states this way.
    G = len(koffs)
    states = enumerate_states(G,q)
    num_states = len(states)
    assert len(states) == sum(choose(G,i) for i in range(q+1))
    R = np.zeros((num_states,num_states))
    for i,state_i in enumerate(states):
        for j,state_j in enumerate(states):
            if verbose:
                print "considering:",i,state_i,"->",j,state_j
            dist = hamming(state_i,state_j)
            if dist != 1:
                # deal with diagonal elements later...
                if verbose:
                    print "distance is:",dist,"continuing..."
                continue
            if sum(state_j) == sum(state_i) + 1:
                R[i][j] = q - sum(state_i)
                if verbose:
                    print i,state_i,"->",j,state_j, "is an on-reaction, rate:",R[i][j]
            elif sum(state_j) == sum(state_i) - 1:
                diff_idx,diff_site = find(lambda (idx,(si,sj)):si != sj,enumerate(zip(state_i,state_j)))
                R[i][j] = koffs[diff_idx]
                if verbose:
                    print i,state_i,"->",j,state_j, "is an off-reaction (at site",diff_idx,")  rate:",R[i][j]
    # deal with diagonal elements
    for i in range(num_states):
        R[i][i] = -sum(R[i])
    print "finished rate matrix"
    return R
Example #41
0
def dl_fna(species_name):
    """Dl fna if necessary, return filename"""
    accession = dl_gbk(species_name)
    print "accession:",accession
    fna_name = accession + ".fna"
    print "fna_name:",fna_name
    target_path = os.path.join("data",species_name,fna_name)
    if os.path.isfile(target_path):
        print "found fna:",target_path
        return target_path
    print "didn't find fna for:",species_name,"downloading"
    host.chdir('/genomes/Bacteria/')
    dir_list = host.listdir(host.curdir)
    sorted_dir_list = sorted(dir_list,key=lambda fname:levenshtein(species_name,fname))
    for dir_name in sorted_dir_list:
        print "trying:",dir_name
        try:
            host.chdir('/genomes/Bacteria/' + dir_name + '/')
            sub_dir_list = host.listdir(host.curdir)
            if find(lambda name:name.startswith(accession),sub_dir_list):
                host.download(fna_name,target_path)
                return target_path
        except:
            continue
    print "Couldn't find fna for:",species_name
    return None
Example #42
0
    def get(self, event_code, talk_code):
        logging.info('Event Code: %s Talk Code: %s ' % (event_code, talk_code))
        talk_code = talk_code.upper()
        event_code = event_code.lower()

        event = utils.find(lambda e: event_code == e.code, Events().get_all())
        talk = Talks().get_talk_for_event(event_code, talk_code)

        if((talk is None) or (event is None)):
            self.response.write('Resource Not Found')
            self.response.set_status(404)
            return

        talk = talk[0]

        logging.info('Talk Details: ' + talk.code)

        feedbacks = get_talk_feedbacks(talk_code)

        template_values = {}
        template_values['feedbacks'] = feedbacks
        template_values['talk'] = talk
        template_values['header'] = event.header
        template_values['feedbacksCount'] = len(feedbacks)
        template_values['averageRating'] = 0 if len(feedbacks) == 0 else  sum(
            [int(ast.literal_eval(feedback.content)['ratingC']) for feedback in feedbacks]) / len(feedbacks)

        template = jinja_environment.get_template(html_path + 'view.html')
        self.response.out.write(template.render(template_values))
Example #43
0
def get_or_create_security_group(c):
    groups = c.get_all_security_groups()
    group = find(groups, {'name': 'skipper'})
    if not group:
        print("Creating new security group.")
        group = c.create_security_group("skipper", "Skipper security group")
    else:
        print("Found existing security group.")
    return group
Example #44
0
def brickfind_crawl(brick, args):
    if brick.endswith("/"):
        brick = brick[0:len(brick)-1]

    working_dir = os.path.dirname(args.outfile)
    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)

    with open(args.outfile, "a+") as fout:
        brick_path_len = len(brick)

        def mtime_filter(path):
            try:
                st = os.lstat(path)
            except (OSError, IOError) as e:
                if e.errno == ENOENT:
                    st = None
                else:
                    raise

            if st and (st.st_mtime > args.start or st.st_ctime > args.start):
                return True

            return False

        def output_callback(path):
            path = path.strip()
            path = path[brick_path_len+1:]
            output_write(fout, path, args.output_prefix)

        ignore_dirs = [os.path.join(brick, dirname)
                       for dirname in
                       conf.get_opt("brick_ignore_dirs").split(",")]

        if args.full:
            find(brick, callback_func=output_callback,
                 ignore_dirs=ignore_dirs)
        else:
            find(brick, callback_func=output_callback,
                 filter_func=mtime_filter,
                 ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
Example #45
0
def search_files(request):
    from utils import find
    query = request.GET.get('query',None)
    results=[]
    if query:
        shares = Share.user_queryset(request.user)
        for s in shares:
            r=find(s,query,prepend_share_id=False)
            results.append({'share':s,'results':r})
    return render(request, 'search/search_files.html', {'query':query,'results':results})
Example #46
0
File: core.py Project: mattorb/Ale
def getCommandInstance(command):
    try:
        module = importCommand(command)
        commandInstances = [commandClass() for commandClass in Command.__subclasses__()]
        commandToExec = utils.find(lambda recipes_installedCommand: recipes_installedCommand.name == command,
                                   commandInstances)
    except ImportError, e:
        logging.error('Unknown command: %s.' % command)
        print 'Search available commands with "ale search", install new command with "ale install <command>"'
        return
Example #47
0
 def __init__(self, filename, rootname, glade_dir="."):
     """Initialize a new instance.
     'filename' is the name of the .glade file containing the UI hierarchy.
     'rootname' is the name of the topmost widget to be loaded.
     'gladeDir' is the name of the directory, relative to the Python
     path, in which to search for `filename'."""
     if glade_dir:
         filename = os.path.join(glade_dir, filename)
     self._glade_path_name = utils.find(filename)
     
     gtk.glade.XML.__init__(self, self._glade_path_name, rootname)
     self.root = self.get_widget(rootname)
Example #48
0
def patch():
  os.chdir(utils.mojo_root_dir)

  utils.filter_file("build/landmines.py", lambda line: not "gyp" in line)
  utils.commit("filter gyp out of build/landmines.py")

  utils.filter_file("gpu/BUILD.gn", lambda line: not "//gpu/ipc" in line)
  utils.commit("filter gpu/ipc out of gpu/BUILD.gn")

  for p in utils.find(["*.patch"], os.path.dirname(os.path.realpath(__file__))):
    print "applying patch %s" % os.path.basename(p)
    utils.system(["git", "apply", p])
    utils.commit("applied patch %s" % os.path.basename(p))
    def _parse_lib_list(self, txt_filename):
        """Returns a map of VNDK library lists per VNDK snapshot arch.

        Args:
          txt_filename: string, name of snapshot config file

        Returns:
          dict, e.g. {'arm64': ['libfoo.so', 'libbar.so', ...], ...}
        """
        lib_map = dict()
        for txt_path in utils.find(self._install_dir, [txt_filename]):
            arch = utils.snapshot_arch_from_path(txt_path)
            abs_path_of_txt = os.path.join(self._install_dir, txt_path)
            with open(abs_path_of_txt, 'r') as f:
                lib_map[arch] = f.read().strip().split('\n')
        return lib_map
Example #50
0
def search_ok(evt):
	text = appenv.search_dialog.FindWindowByName("what").Value
	if text == "":
		wx.MessageBox(u"Nelze vyhledávat, nebylo specifikováno hledané.", u"Nelze pokračovat", wx.ICON_ERROR, appenv.search_dialog)
	else:
		if appenv.search_dialog.FindWindowByName("re").Value:
			pos = utils.find_by_regex(text, appenv.policko.Value, appenv.search_dialog.FindWindowByName("case").Value)
		else:
			pos = utils.find(text, appenv.policko.Value, appenv.search_dialog.FindWindowByName("case").Value)
		if pos >= 0:
			appenv.policko.InsertionPoint = pos
			appenv.search_dialog.Hide()
		elif pos == -1:
			wx.MessageBox(u"Hledaný text nebyl nalezen.", u"Nenalezeno", wx.ICON_ERROR, appenv.search_dialog)
		elif pos == -2:
			wx.MessageDialog(u"Vložený regulární výraz není syntakticky správný.", u"Neplatný regulární výraz", wx.ICON_ERROR, appenv.search_dialog)
Example #51
0
    def get(self, event_code):
        event_code = event_code.lower()
        event = utils.find(lambda e: event_code == e.code, Events().get_all())
        if(event is None):
            self.response.write('Resource Not Found')
            self.set_status(404)
            return

        template_values = {}

        feedbacks = get_all_feedbacks()
        template_values['feedbacks'] = feedbacks
        template_values['feedbacksCount'] = len(feedbacks)

        template = jinja_environment.get_template(html_path + 'summary.html')
        self.response.out.write(template.render(template_values))
Example #52
0
    def _parse_module_paths(self):
        """Parses the module_path.txt files into a dictionary,

        Returns:
          module_paths: dict, e.g. {libfoo.so: some/path/here}
        """
        module_paths = dict()
        for file in utils.find(self._install_dir, [self.MODULE_PATHS_TXT]):
            file_path = os.path.join(self._install_dir, file)
            with open(file_path, 'r') as f:
                for line in f.read().strip().split('\n'):
                    paths = line.split(' ')
                    if len(paths) > 1:
                        if paths[0] not in module_paths:
                            module_paths[paths[0]] = paths[1]
        return module_paths
        def get_notice_file(prebuilt):
            """Returns build rule for notice file (attribute 'notice').

            Args:
              prebuilt: string, name of prebuilt object
            """
            notice = ''
            notice_file_name = '{}.txt'.format(prebuilt)
            notices_dir = os.path.join(self._install_dir,
                                       utils.NOTICE_FILES_DIR_PATH)
            notice_files = utils.find(notices_dir, [notice_file_name])
            if len(notice_files) > 0:
                notice = '{ind}notice: "{notice_file_path}",\n'.format(
                    ind=self.INDENT,
                    notice_file_path=os.path.join(
                        '..', utils.NOTICE_FILES_DIR_PATH, notice_files[0]))
            return notice
Example #54
0
    def get(self, event_code):
        logging.info('Requested Url:' + self.request.url)

        event_code = event_code.lower()
        event = utils.find(lambda e: event_code == e.code, Events().get_all())

        if event is None:
            self.response.write('Resource Not Found')
            self.set_status(404)
            return

        template_values = {'header': event.header, 'categories': Events().get_categories_for_event(event_code),
                           'action': event_code}

        logging.info('Html Path: ' + html_path + 'home.html')

        template = jinja_environment.get_template(html_path + 'home.html')
        self.response.out.write(template.render(template_values))
Example #55
0
    def getPts3D(self, idx = None, filter=True):
        """Transform from depth image to 3D point cloud.
        
        Usage: pts3d = d_img.getPts3D(idx)
        
        Input:
            idx - Either 1d or 2d list of indexes to keep. If 2d
            indexes, we assume it is a list of [x y] pixel positions. Given that
            python uses [row col], the indexed array is pts3D[idx[1,:],
            idx[0,:]].

        Output:
            pts - np.ndarray - 3-by-N ndarray of 3D points

        """
        # We need to distinguish between 1d indexes and 2d indexes
        # 1d-indexes
        if idx is None:
            p3 = self.getXYZ().reshape(self.width*self.height, \
                                       3).T.view(np.ndarray)
        elif idx.size == 0:
            p3 = np.zeros((3,0))

        elif len(idx.shape) == 1:
            pts3D = self.getXYZ().reshape(self.width*self.height, \
                                          3).view(np.ndarray)
            p3 = pts3D[:, idx]

        elif len(idx.shape) == 2:
            # Is it a list of indexes, or a True/False boolean array?
            if idx.shape == self.shape:
                idx = utils.C_ind2sub(utils.find(idx), self.shape)
                idx = idx[::-1,:] # We need [x y], we have [r c]

            idx = idx.astype(int) # Ensure it's integers
            pts3D = self.getXYZ().view(np.ndarray)
            p3 = pts3D[idx[1,:], idx[0,:], :].T

        if filter:
            # Delete points with negative Z
            valid_pts = p3[2,:] > 0
            p3[:, valid_pts]

        return p3
Example #56
0
    def getValidPtsInView(self, view, output_type="bool"):
        """ Compute indexes for the valid 2D points in view 'view'

        Usage:  idxValidPts = model.getValidPtsInView(view)

        Input:
            view - Index of the view for which we want the valid point indexes.
            output_type{'bool'} - 'bool' (bool array of valid/invalid pts) or 
                                  'idx' (indexes of the valid points).

        Output:
            idxValidPts - valid point indexes for view 'view', i.e. those
                s.t. self.pts2D[:, idxValidPts, view] != 0 
        """

        idxBoolPts = np.logical_and(self.pts2D[0, :, view] > 0, self.pts2D[1, :, view] > 0)
        if output_type == "bool":
            return idxBoolPts
        elif output_type == "idx":
            return utils.find(idxBoolPts)
Example #57
0
def patch(dest_dir, relative_patches_dir=os.curdir):
    """Applies the *.patch files in |relative_patches_dir|.

  Args:
    relative_patches_dir: A directory path relative to the current directory.
        Defaults to the directory of this file.

  Raises:
    subprocess.CalledProcessError if the patch couldn't be applied.
  """
    patches_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), relative_patches_dir)
    assert os.path.isdir(patches_dir)

    os.chdir(dest_dir)
    for p in utils.find(["*.patch"], patches_dir):
        print "applying patch %s" % os.path.basename(p)
        try:
            utils.system(["git", "apply", p])
            utils.commit("applied patch %s" % os.path.basename(p))
        except subprocess.CalledProcessError:
            print "ERROR: patch %s failed to apply" % os.path.basename(p)
            raise
Example #58
0
    def post(self, event_code):
        logging.info('Post Action:' + event_code)

        event_code = event_code.lower()
        event = utils.find(lambda e: event_code == e.code, Events().get_all())

        talk_code = self.request.get('talk_code') or 'CONF'
        talk = Talks().get_talk_for_event(event_code, talk_code)

        if ((event is None) or (talk is None)):
            self.response.write('Resource Not Found')
            self.response.set_status(404)
            return

        talk = talk[0]

        template_values = {}
        comment = template_values['comment'] = cgi.escape(self.request.get('comment'))
        ratingA = template_values['ratingA'] = cgi.escape(self.request.get('ratingA'))
        ratingB = template_values['ratingB'] = cgi.escape(self.request.get('ratingB'))
        ratingC = template_values['ratingC'] = cgi.escape(self.request.get('ratingC'))
        provider = template_values['provider'] = cgi.escape(self.request.get('provider')) or 'Anonymous'
        template_values['talk_code'] = talk_code
        template_values['header'] = event.header
        template_values['title'] = talk.title
        template_values['presenters'] = talk.presenters

        logging.info('Talk Code: %s Posted Content: %s ' % (talk_code, comment))

        feedbackContent = {'comment': comment,
                           'ratingA': ratingA,
                           'ratingB': ratingB,
                           'ratingC': ratingC}

        save_feedback(json.dumps(feedbackContent), event_code, talk_code, provider)

        template = jinja_environment.get_template(html_path + 'message.html')
        self.response.out.write(template.render(template_values))