Esempio n. 1
0
def main():
    initialize()
    env = Environment()
    net = Network()
    nl = []
    # an instance that contains network stats
    stats = NetStats()

    for i in range(max_nodes-1):
        n = NodeClass(i, 0.2, 'coordbased', Sources(), net)
        nl.append(n)
    # declare coordinator
    c = CoordClass('coord', 0.3, 'coordbased', Sources(), net)
    # populate network with nodes
    nl.append(c)
    init_net_list(nl, net)
    init_node_protocol(nl)
    c.node.protocol.initCoord(nl)
    #activate(net, net.run())
    print "coord has been started"
    activate(c, c.run())
    print "coord ok, now initializing nodes"
    for ncls in nl:
        #initPhase(ncls.node)
        activate(ncls, ncls.run())
    # manual import of net's message queue to coord
    #c.node.protocol.incoming_messages = net.MSG_QUEUE
    # c.node.protocol.processRecvdMessage()
    simulate(until=maxSimTime)
    pdb.set_trace()
Esempio n. 2
0
class Bot():
    def __init__(self, config):
        self.url = config.get_url()
        self.sources = None
        self.stories = None

    def load(self):
        self.sources = Sources(self.url)
        self.stories = Stories(self.sources)
        return self.stories.load()

    def start(self, url):
        message = 'Бот для сайта {0}'.format(url)
        return message

    def help(self):
        message = "/get - читать истории из: \n\t{0}\n"\
        "/random - случайные истории\n"\
        "/stop - прервать диалог с ботом".format(
            '\n\t'.join(['{0}'.format(y) for (x,y) in self.stories.get_description().items()]))
        return message

    def random(self, num=None, site_names=None):
        if site_names is None:
            site_names = list(self.stories.get_names().keys())
        sites = list(self.stories.get_names().values())
        messages = []
        stories = self.stories.get(num=num,
                                   site_names=site_names,
                                   sites=sites,
                                   random=True)
        for s in stories:
            messages.append(s.get().get('story'))
        return messages

    def get(self, num=None, site_names=None):
        if site_names is None:
            site_names = list(self.stories.get_names().keys())
        sites = list(self.stories.get_names().values())
        messages = []
        stories = self.stories.get(num=num, site_names=site_names, sites=sites)
        for s in stories:
            messages.append(s.get().get('story'))
        return messages

    def get_sources_sites(self):
        sites = set()
        for sites_list in self.sources.get():
            for site in sites_list:
                sites.add(site.get('site'))
        return list(sites)

    def get_sources_names(self, site):
        names = set()
        for sites_list in self.sources.get():
            for s in sites_list:
                if s.get('site') == site:
                    names.add((s.get('name'), s.get('desc')))
        return list(names)
Esempio n. 3
0
 def setUp(self):
     """ initial setup"""
     self.net = Network()
     self.nc1 = NodeClass('simple_node_1', 0.1, 'coordbased', \
                         Sources(), self.net)
     self.nc2 = NodeClass('simple_node_2', 0.1, 'coordbased', \
                         Sources(), self.net)
     self.cc = CoordClass('coord', 0.1, 'coordbased', Sources(), self.net)
     self.li = [self.nc1, self.nc2, self.cc]
     self.nc1.node.protocol.initProtocol(self.li)
     self.nc2.node.protocol.initProtocol(self.li)
     self.cc.node.protocol.initProtocol(self.li)
     self.net.NodeList = self.li
Esempio n. 4
0
 def new_sources(self, domain, api_key, name=""):
     # TODO - First search jigsaw
     # If that fails search everything else
     email_pattern = Sources()._jigsaw_search(domain)
     if email_pattern != {}:
         ''' Webhook() '''
     else:
         q.enqueue(EmailGuess.search_sources, domain, api_key, name)
Esempio n. 5
0
    def search_sources(self, domain, api_key, name=""):
        pattern = Toofr().get(domain)
        if pattern:
            ptn = {
                "domain": domain,
                "company_email_pattern": [{
                    "source": "toofr",
                    "pattern": pattern
                }]
            }
            self._find_if_object_exists('EmailPattern', 'domain', domain, ptn)
            Webhook()._update_company_email_pattern(ptn)
            return pattern

        # syncronous jigsaw search
        # job_5 = q.enqueue(Sources()._jigsaw_search, domain)
        job_1 = q.enqueue(Sources()._whois_search, domain)
        job_2 = q.enqueue(Sources()._google_span_search, domain)
        job_3 = q.enqueue(Sources()._press_search, domain, api_key)
        job_4 = q.enqueue(Sources()._zoominfo_search, domain)
        jobs = [job_1, job_2, job_3, job_4]
        if name != "":
            job_5 = q.enqueue(Sources()._mx_server_check, name, domain)
            job_6 = q.enqueue(Sources()._linkedin_login_search, name, domain)
            jobs = jobs + [job_5, job_6]

        for job in jobs:
            RQueue()._meta(job, "{0}_{1}".format(domain, api_key))
Esempio n. 6
0
def main():
    initialize()
    env = Environment()
    net = Network()
    nl = []
    stats = NetStats()

    for i in range(max_nodes - 1):
        n = NodeClass(i, 0.2, 'coordbased', Sources(), net)
        nl.append(n)
    # declare coordinator
    c = CoordClass('coord', 0.3, 'coordbased', Sources(), net)
    nl.append(c)
    init_env(nl, env)
    init_net_list(nl, net)
    init_threading(nl, env)
    init_node_protocol(nl)
    c.node.protocol.initCoord(nl)
    activate(c, c.run())
    for ncls in nl:
        activate(ncls, ncls.run())
    simulate(until=maxSimTime)
Esempio n. 7
0
def scan_tree(
    dirnode,
    source_patterns=DEFAULT_SOURCE_PATTERNS,
    include_patterns=DEFAULT_INCLUDE_PATTERNS,
    assembly_patterns=DEFAULT_ASSEMBLY_PATTERNS,
    recursive=True):
    """
    Recursively search/glob source files, include files, etc.
    :param dirnode: A root directory node - root of search tree (Dir)
    :param source_patterns: A list of source file name patterns to search (list of str)
    :param include_patterns: A list of include file name patterns to search (list of str)
    :param assembly_patterns: A list of assembly file name patterns to search (list of str)
    :param recursive: Flag to determine if file/directory search operation should be recursive (bool)
    :return: A sources object (Sources)

    Example usage:
        sources = scan_tree(Dir("RTOS"))
        sources.source_filenodes  # A list of all source file nodes
        sources.include_dirnodes  # A list of all include directory nodes
    """
    dirnode = Dir(dirnode)
    sources = Sources()
    for dirpath, dirnames, filenames in os.walk(os.path.relpath(dirnode.abspath)):
        if "SConscript" in filenames:
            subsidary_sources = SConscript(Dir(dirpath).File("SConscript"))
            if isinstance(subsidary_sources, Sources):
                sources += subsidary_sources
                dirnames[:] = []  # End recursion for this directory tree
                continue

        for source_pattern in source_patterns:
            matching_source_filenodes = Glob(os.path.join(dirpath, source_pattern))
            sources.source_filenodes.extend(matching_source_filenodes)
            if Dir(dirpath) not in sources.source_dirnodes:
                sources.source_dirnodes.append(Dir(dirpath))

        for assembly_pattern in assembly_patterns:
            matching_assembly_filenodes = Glob(os.path.join(dirpath, assembly_pattern))
            sources.assembly_filenodes.extend(matching_assembly_filenodes)

        for include_pattern in include_patterns:
            matching_include_filenodes = Glob(os.path.join(dirpath, include_pattern))
            sources.include_filenodes.extend(matching_include_filenodes)
            if (len(fnmatch.filter(filenames, include_pattern)) > 0) and (Dir(dirpath) not in sources.include_dirnodes):
                sources.include_dirnodes.append(Dir(dirpath))

        if not recursive:
            break

    return sources
Esempio n. 8
0
def main():
    newsapi_obj = NewsApi()
    newsapi_obj._key = '0eacfb62485f4a4cacf8b62680452532'
    
    #To fetch top headlines from newsapi
    #possible display_items - ['name', 'author', 'title', 'description', 'publishedAt']
    TopHeadlines().show_top_headlines(display_items=['name', 'author', 'title', 'description', 'publishedAt'])
    
    #To fetch sources from newsapi
    #possible display_items - ['name', 'description', 'url', 'category', 'language', 'country']
    Sources().show_sources(display_items=['name', 'description', 'url', 'category', 'language', 'country'])

    #To fetch articles based on the given search item
    #possible display_items - ['name', 'author', 'title', 'description', 'publishedAt']
    Everything().show_everything(kw_to_search_in_title='airtel', kw_to_serch_in_both=False, display_items=['name', 'author', 'title', 'description', 'publishedAt'])
Esempio n. 9
0
def main():
    initialize()
    env = Environment()
    net = Network()
    nl = []

    for i in range(max_nodes):
        n = NodeClass(i, '0.2', 'decentralized', Sources(), net)
        nl.append(n)
    # populate network with nodes
    init_net_list(nl, net)
    #init protocol
    init_protocol_requirements(nl)
    init_protocol_phase(nl, net)
    print_results(nl)
    # activate(net, net.run())
    # for i, ncls in zip(range(len(nl)), nl):
    for ncls in nl:
        activate(ncls, ncls.run())
    activate(net, net.run())
    simulate(until=maxSimTime)
Esempio n. 10
0
#obsplanner.init_pos(coord)


# set up instrument:
instrument = InstrumentSimple(100., 0., name='WALOP')
obsplanner.set_instrument(instrument)

# add sources:
dtype = [('name', 'S30'), ('ra', float), ('dec', float), ('expt', float),
         ('expn', int)]
sources = np.loadtxt(
        #'unittests/sourcelists/sources_fewer.csv', dtype=dtype, skiprows=1,
        'tests_old/sourcelists/targets.csv', dtype=dtype, skiprows=1,
        delimiter=',', usecols=range(5))
targets = Sources(
        sources['name'], sources['ra'], sources['dec'], sources['expt'],
        sources['expn'])
obsplanner.set_sources(targets)

# add calibrator sources:
dtype = [('name', 'S30'), ('ra', float), ('dec', float), ('expt', float),
         ('expn', int)]
sources = np.loadtxt(
        'tests_old/sourcelists/calibrators.csv', dtype=dtype, skiprows=1,
        delimiter=',', usecols=range(5))
calibrators = CalSources(
        sources['name'], sources['ra'], sources['dec'], sources['expt'],
        sources['expn'])
obsplanner.set_calibrators(calibrators)

# add limits:
Esempio n. 11
0
 def __init__(self):
     self.photostore_sources = Sources(True)
     self.photostore_sources.create()
Esempio n. 12
0
class SourcesTest(object):
    def __init__(self):
        self.photostore_sources = Sources(True)
        self.photostore_sources.create()

    def test_sources(self):
        curtime = str(time.time())    
        self.photostore_sources.put('1', 'foo', 'Foo', 'fa-user')
        self.photostore_sources.put('1', 'bar', 'Bar', 'fa-user')
        self.photostore_sources.put('1', 'goo', 'Goo', 'fa-user')
        self.photostore_sources.put('1', 'gopa', 'Gopa Kumar', 'fa-user')
        self.photostore_sources.put('1', 'blar', 'Blar', 'fa-user')
        data = self.photostore_sources.get('1')
        for d in data:
            print 'GET: data source %s, description %s, icon, %s act %s\n' % \
                (d['g_uid'], 
                 d['SourceDescription'], 
                 d['SourceIcon'], d['SourceAccount'])
        data = self.photostore_sources.get('1', 'foo')
        for d in data:
            print 'QUERY: data source %s, description %s, icon %s, act %s\n' % \
                (d['g_uid'], 
                 d['SourceDescription'], 
                 d['SourceIcon'], d['SourceAccount'])
Esempio n. 13
0
def get_user_sources(username):
    src = Sources()
    src.create()
    return src.get(username)      
Esempio n. 14
0
def jigsaw_search():
    company_name = request.args['company_name']
    q.enqueue(Sources()._jigsaw_search, company_name)
    #Sources()._jigsaw_search(company_name)
    return {'started': True}
Esempio n. 15
0
def mx_search():
    name, domain = request.args['name'], request.args['domain']
    q.enqueue(Sources()._mx_server_check, name, domain)
    return {'started': True}
Esempio n. 16
0
class Utilities:
    masterDomainList = []
    email = ""
    orgName = ""
    keys = Sources()

    def getOrgASN(self, partialOrgName):
        ASNFile = open("asnlist.txt", "r")
        index = 0
        possibleMatches = {}
        for line in ASNFile:
            try:
                name = re.match(r"\d+,(.+), \w\w", line).group(1)
                if (re.search(partialOrgName, name, re.IGNORECASE)):
                    #name = name.split(" - ")[-1]
                    possibleMatches[index] = name
                    index += 1
            except:
                pass
        return possibleMatches

    def getWhoxy(self, orgName, verbose):
        total = []
        baseUrl = "https://api.whoxy.com/?key=" + self.keys.whoxyKey + "&reverse=whois"
        try:
            search = "&company=" + orgName.replace(" ", "+")
            url = baseUrl + search
            response = requests.get(url + "&mode=mini")
            data = json.loads(response.text)
            totalPages = data["total_pages"]
            i = 0
            while i < totalPages:
                i += 1
                response = requests.get(url + "&mode=mini&page=" + str(i))
                data = json.loads(response.text)
                for domain in data["search_result"]:
                    total.append(domain["domain_name"])
                    if verbose:
                        print(domain["domain_name"])
        except KeyError:
            print("ERROR: Skipping WHOXY, no API credits remaining.")
        return total

    def getWhoisXML(self, orgName, verbose):
        total = []
        baseUrl = "https://reverse-whois-api.whoisxmlapi.com/api/v2"
        params = {
            "apiKey": self.keys.whoisXmlKey,
            "searchType": "current",
            "mode": "purchase",
            "basicSearchTerms": {
                "include": [orgName, "US"],
                "exclude": ["Europe", "EU"]
            }
        }
        response = json.loads(
            requests.post(baseUrl, data=json.dumps(params)).text)
        try:
            for domain in response["domainsList"]:
                total.append(domain)
                if verbose:
                    print(domain)
        except KeyError:
            print("ERROR: Skipping whoisXML, no API credits remaining.")
        return total

    def orgNameFromWHOIS(self, domainName):
        response = requests.get("http://api.whoxy.com/?key=" +
                                self.keys.whoxyKey + "&whois=" +
                                str(domainName))
        data = json.loads(response.text)
        try:
            return data["registrant_contact"]["company_name"]
        except:
            print("No organization found from domain name.\n")
            return ""

    def getWhoxyEmail(self, emailAddress, verbose):
        total = []
        baseUrl = "https://api.whoxy.com/?key=" + self.keys.whoxyKey + "&reverse=whois"
        try:
            search = "&email=" + emailAddress.replace(" ", "+")
            url = baseUrl + search
            response = requests.get(url + "&mode=mini")
            data = json.loads(response.text)
            totalPages = data["total_pages"]
            i = 0
            while i < totalPages:
                i += 1
                response = requests.get(url + "&mode=mini&page=" + str(i))
                data = json.loads(response.text)
                for domain in data["search_result"]:
                    total.append(domain["domain_name"])
                    if verbose:
                        print(domain["domain_name"])
        except KeyError:
            print("ERROR: Skipping WHOXY, no API credits remaining.")
        return total

    def getWhoisXMLEmail(self, email, verbose):
        email = "@" + email.split("@")[1]
        total = []
        baseUrl = "https://reverse-whois-api.whoisxmlapi.com/api/v2"
        params = {
            "apiKey": self.keys.whoisXmlKey,
            "searchType": "current",
            "mode": "purchase",
            "basicSearchTerms": {
                "include": [email, "US"],
                "exclude": ["Europe", "EU"]
            }
        }
        response = json.loads(
            requests.post(baseUrl, data=json.dumps(params)).text)
        try:
            for domain in response["domainsList"]:
                total.append(domain)
                if verbose:
                    print(domain)
        except KeyError:
            print("ERROR: Skipping whoisXML, no API credits remaining.")
        return total

    def getOrgNameDomains(self, orgName, verbose):
        whoxyDomains = set(self.getWhoxy(orgName, verbose))
        whoisXMLDomains = set(self.getWhoisXML(orgName, verbose))
        self.masterDomainList = whoxyDomains.union(whoisXMLDomains)
        return self.masterDomainList

    def getEmailDomains(self, emailAddress, verbose):
        whoxyDomains = set(self.getWhoxyEmail(emailAddress, verbose))
        whoisXMLDomains = set(self.getWhoisXMLEmail(emailAddress, verbose))
        self.masterDomainList = whoxyDomains.union(whoisXMLDomains)
        return self.masterDomainList

    def writeToFile(self, domainList, outputFileName):
        outputFile = open(outputFileName, "w")
        for domain in domainList:
            outputFile.write(domain + "\n")
        outputFile.close()
Esempio n. 17
0
 def load(self):
     self.sources = Sources(self.url)
     self.stories = Stories(self.sources)
     return self.stories.load()
Esempio n. 18
0
 def crawl(self):
     self.loop.run_until_complete(self.get_links(Sources.__subclasses__()))
     return list({item for sublist in self.links for item in sublist})
Esempio n. 19
0
class RecommendationPage:
    def __init__(self, app):
        self.app = app
        self.sources = Sources()

    def resolve_source_name(self, url):
        parsed_uri = urlparse(url)
        return self.sources.url_names.get(parsed_uri.netloc, 'Unknown source')

    def resolve_source_id(self, name):
        return self.sources.resolve_source_id(name)

    def resolve_bias_display(self, source_code):
        bias = self.sources.sources_bias_map[source_code]
        return self.sources.bias_display_names[bias]

    def build_recommendations_list(self, df, subset, top_n):
        df = df.head(top_n)

        return [row.to_dict() for i, row in df.iterrows()]

    def append_bias(self, rec_df):
        """
        For diplaying the bias information for each recommended article
        """
        bias_map = self.sources.sources_bias_map
        rec_df['publication'] = rec_df['source'].map(lambda x: x['name'])
        rec_df['bias_code'] = rec_df['source'].map(lambda x:
                                                   (bias_map.get(x['id'])))
        rec_df['icon_url'] = rec_df['source'].map(
            lambda x: self.resolve_img_url(x['id']))
        # bias = rec_df['publication'].map(lambda x: bias_map.get(x, bias_map.get('The ' + x, bias_map.get(x + ' News'))))
        rec_df['bias_label'] = rec_df['bias_code'].map(
            lambda b: self.sources.bias_display_names.get(b))
        # df['bias_score'] = bias.map(lambda b: self.sources.bias_score.get(b))
        return rec_df

    def resolve_bias_code(self, source_code):
        return self.sources.sources_bias_map[source_code]

    def resolve_valid_biases(self, input_bias):
        biases = list(self.sources.bias_sources_map.keys())
        left = biases[:3]
        right = biases[4:]
        extreme = biases[0:1] + biases[-1:]

        if input_bias in left:
            valid = right + [biases[3]]
        elif input_bias in right:
            valid = left + [biases[3]]
        else:  # source is relatively unbiased- hit em with the crazy
            valid = extreme

        if input_bias in extreme:
            valid.append(biases[3])  # append center

        return valid  # + [None] # include None case for if no bias was determined

    def resolve_img_url(self, source_id):
        jpg = 'imgs/{}.jpg'.format(source_id)
        # png = 'imgs/{}.png'.format(source_id)
        jpg_file = url_for('static', filename=jpg)

        return jpg_file

        # TODO support png or whatever if that's what we have
        # png = 'imgs/{}.png'.format(source_id)
        # if os.path.isfile(os.path.join(self.app.instance_path, jpg_file)):
        #     print('is not file:', jpg_file)
        # else:
        #     print('going with file:', url_for('static', filename=png))
        #     return url_for('static', filename=png)

    def get_valid_sources(self, valid_biases):
        return flatten(
            [self.sources.bias_source_id_map[b] for b in valid_biases])
Esempio n. 20
0
 def __init__(self, app):
     self.app = app
     self.sources = Sources()
Esempio n. 21
0
def add_media_process(title, imdb):
    count = 0

    from player import getSetting, load_settings
    import anidub, hdclub, nnmclub, rutor, soap4me, bluebird, kinohd

    settings = load_settings()

    anidub_enable = getSetting('anidub_enable') == 'true'
    hdclub_enable = False
    bluebird_enable = getSetting('bluebird_enable') == 'true'
    nnmclub_enable = getSetting('nnmclub_enable') == 'true'
    rutor_enable = getSetting('rutor_enable') == 'true'
    soap4me_enable = getSetting('soap4me_enable') == 'true'
    kinohd_enable = getSetting('kinohd_enable') == 'true'

    class RemoteDialogProgress:
        progress_file_path = filesystem.join(addon_data_path(),
                                             '.'.join([imdb, 'progress']))

        def update(self, percent, *args, **kwargs):
            with filesystem.fopen(self.progress_file_path,
                                  'w') as progress_file:
                progress_file.write(str(percent) + '\n')
                progress_file.write('\n'.join(args).encode('utf-8'))

        def close(self):
            try:
                filesystem.remove(self.progress_file_path)
            except:
                pass

    settings.progress_dialog = RemoteDialogProgress()

    p = []

    from log import dump_context
    #try:
    if True:
        if anidub_enable and imdb.startswith('sm'):
            with dump_context('anidub.search_generate'):
                c = anidub.search_generate(title, settings, p)
                count += c

        if imdb.startswith('tt'):
            #if hdclub_enable:
            #	c = hdclub.search_generate(title, imdb, settings, p)
            #	count += c
            if bluebird_enable:
                with dump_context('bluebird.search_generate'):
                    c = bluebird.search_generate(title, imdb, settings, p)
                    count += c
            if rutor_enable:
                with dump_context('rutor.search_generate'):
                    c = rutor.search_generate(title, imdb, settings, p)
                    count += c
            if kinohd_enable:
                with dump_context('kinohd.search_generate'):
                    c = kinohd.search_generate(title, imdb, settings, p)
                    count += c

            if nnmclub_enable:
                with dump_context('nnmclub.search_generate'):
                    c = nnmclub.search_generate(title, imdb, settings, p)
                    count += c
            #if soap4me_enable:
            #	count += soap4me.search_generate(title, imdb, settings)
    #except BaseException as e:
    #	log.print_tb(e)

    if p:
        path = filesystem.join(addon_data_path(), imdb + '.strm_path')
        with filesystem.fopen(path, 'w') as f:
            f.write(p[0].encode('utf-8'))

    settings.progress_dialog.close()

    if count:
        import xbmc
        if not xbmc.getCondVisibility('Library.IsScanningVideo'):
            if p and p[0]:
                path = p[0]

                if path.endswith('.strm'):
                    type = 'movies'
                else:
                    type = 'tvshows'

                base_path = filesystem.dirname(p[0])

                from sources import Sources
                srcs = Sources()
                for src in srcs.get('video', normalize=False):
                    src_path_basename = filesystem.basename(
                        src.path.rstrip('\\/'))
                    if src_path_basename == base_path:  #base_path.lower().replace('\\', '/') in src.path.lower().replace('\\', '/'):
                        path_update = src.path
                        if type == 'tvshows':
                            if src.path.startswith('smb://'):
                                path_update = src.path
                                path_update = path_update.strip(
                                    '\\/') + '/' + filesystem.basename(path)
                            else:
                                path_update = filesystem.join(
                                    src.path, filesystem.basename(path))
                        log.debug(path_update)
                        xbmc.executebuiltin('UpdateLibrary("video","%s")' %
                                            path_update.encode('utf-8'))

                #xbmc.executebuiltin('UpdateLibrary("video")')
            else:
                xbmc.executebuiltin('UpdateLibrary("video")')

            xbmc.sleep(250)
            while xbmc.getCondVisibility('Library.IsScanningVideo'):
                xbmc.sleep(100)

    path = filesystem.join(addon_data_path(), imdb + '.ended')
    with filesystem.fopen(path, 'w') as f:
        f.write(str(count))
Esempio n. 22
0
def scan_tree(dirnode,
              source_patterns=DEFAULT_SOURCE_PATTERNS,
              include_patterns=DEFAULT_INCLUDE_PATTERNS,
              assembly_patterns=DEFAULT_ASSEMBLY_PATTERNS,
              linker_patterns=DEFAULT_LINKER_PATTERNS,
              unit_test_patterns=DEFAULT_UNIT_TEST_PATTERNS,
              subsidiary_scons_filename="subsidiary_scons",
              ignore_dirnames=DEFAULT_IGNORE_DIRNAMES,
              recursive=True):
    """
    Recursively search/glob source files, include files, etc.
    :param dirnode: A root directory node - root of search tree (Dir)
    :param source_patterns: A list of source file name patterns to search (list of str)
    :param include_patterns: A list of include file name patterns to search (list of str)
    :param assembly_patterns: A list of assembly file name patterns to search (list of str)
    :param linker_patterns: A list of linker script file name patterns to search (list of str)
    :param unit_test_patterns: A list of unit test file name patterns to search (list of str)
    :param subsidiary_scons_filename: Subsidiary SCons file name (str)
    :param ignore_dirnames: List of directory names to ignore (list of str)
    :param recursive: Flag to determine if file/directory search operation should be recursive (bool)
    :return: A sources object (Sources)

    Example usage:
        sources = scan_tree(Dir("RTOS"))
        sources.source_filenodes  # A list of all source file nodes
        sources.include_dirnodes  # A list of all include directory nodes
    """
    dirnode = Dir(dirnode)
    ignore_dirnames = ignore_dirnames or []
    sources = Sources()

    unit_test_source_patterns = []
    for unit_test_pattern in unit_test_patterns:
        for source_pattern in source_patterns:
            unit_test_source_patterns.append("{}{}".format(
                unit_test_pattern, source_pattern))

    root_dirpath = os.path.relpath(dirnode.abspath)
    for dirpath, dirnames, filenames in os.walk(root_dirpath):
        if os.path.basename(dirpath) in ignore_dirnames:
            continue

        if os.path.basename(dirpath) == DEFAULT_UNIT_TEST_HEADER_OVERRIDE:
            for include_pattern in include_patterns:
                matching_include_filenodes = Glob(
                    os.path.join(dirpath, include_pattern))
                sources.unit_test_header_filenodes.extend(
                    matching_include_filenodes)
            continue

        # Do not invoke subsidiary scons in root directory to avoid infinite recursion
        if (dirpath != root_dirpath) and (subsidiary_scons_filename
                                          in filenames):
            subsidary_sources = SConscript(
                Dir(dirpath).File(subsidiary_scons_filename))
            if isinstance(subsidary_sources, Sources):
                sources += subsidary_sources
                dirnames[:] = []  # End recursion for this directory tree
                continue

        for source_pattern in source_patterns:
            matching_source_filenodes = Glob(
                os.path.join(dirpath, source_pattern))
            for filenode in matching_source_filenodes:
                for unit_test_pattern in unit_test_patterns:
                    if not fnmatch.fnmatch(filenode.name, unit_test_pattern):
                        sources.source_filenodes.append(filenode)
                        if Dir(dirpath) not in sources.source_dirnodes:
                            sources.source_dirnodes.append(Dir(dirpath))

        for include_pattern in include_patterns:
            matching_include_filenodes = Glob(
                os.path.join(dirpath, include_pattern))
            sources.include_filenodes.extend(matching_include_filenodes)
            if (len(fnmatch.filter(filenames, include_pattern)) >
                    0) and (Dir(dirpath) not in sources.include_dirnodes):
                sources.include_dirnodes.append(Dir(dirpath))

        for assembly_pattern in assembly_patterns:
            matching_assembly_filenodes = Glob(
                os.path.join(dirpath, assembly_pattern))
            sources.assembly_filenodes.extend(matching_assembly_filenodes)

        for linker_pattern in linker_patterns:
            matching_linker_filenodes = Glob(
                os.path.join(dirpath, linker_pattern))
            sources.linker_filenodes.extend(matching_linker_filenodes)

        for unit_test_source_pattern in unit_test_source_patterns:
            matching_unit_test_source_filenodes = Glob(
                os.path.join(dirpath, unit_test_source_pattern))
            sources.unit_test_filenodes.extend(
                matching_unit_test_source_filenodes)

        if not recursive:
            break

    return sources