Beispiel #1
0
	def generate_music_nabresponse(self):
		dstring = []
		if(self.args.has_key('artist')):
			dstring.append(SearchModule.sanitize_strings(self.args['artist']))
		if(self.args.has_key('album')):
			dstring.append(SearchModule.sanitize_strings(self.args['album']))
		if(self.args.has_key('track')):
			dstring.append(SearchModule.sanitize_strings(self.args['track']))
		if(self.args.has_key('year')):
			dstring.append(SearchModule.sanitize_strings(self.args['year']))

		music_search_str = ''
		for i in xrange(len(dstring)):
			if(len(dstring[i]) and i<len(dstring)-1):
				music_search_str = music_search_str + dstring[i]

		print music_search_str
		#~ print movie_search_str
		self.searchstring = music_search_str
		self.typesearch = 0
		#~ compile results
		#~ results = SearchModule.performSearch(movie_search_str, self.cfg )
		#~ flatten and summarize them
		#~ cleaned_results = megasearch.summary_results(results,movie_search_str)
		#~ render XML
		#~ return self.cleanUpResultsXML(cleaned_results)
		return 'm'
Beispiel #2
0
	def dosearch(self, args):
		#~ restore originals
		self.cfg = copy.deepcopy(self.cfg_cpy)
		
		if('q' not in args):
			self.results = []
			return self.results
		nuqry = args['q'] + ' ' + self.cgen['searchaddontxt']
		self.logic_items = self.logic_expr.findall(nuqry)
		self.qry_nologic = self.logic_expr.sub(" ",nuqry)
		if('selcat' in args):
			if(args['selcat'] != ""):
				self.qry_nologic += " " + args['selcat']

		#~ speed class
		speed_class_sel = 1	
		if('tm' in args):
			speed_class_sel = int(args['tm'])
		
		#~ speed class deepsearch
		self.ds.set_timeout_speedclass(speed_class_sel)
		#~ speed class Nabbased	
		for conf in self.cfg :
			if ( (conf['speed_class'] <=  speed_class_sel) and (conf['valid'])):
				conf['timeout']  = self.cgen['timeout_class'][ speed_class_sel ]
				#~ print conf['type'] + " " + str(conf['timeout'] ) + ' ' + str(speed_class_sel )
			else:
				conf['valid']  = 0
		 
					
		if( len(args['q']) == 0 ):
			if('selcat' in args):
				if(len(args['selcat'])==0):
					self.results = []
					return self.results
			else:
				self.results = []
				return self.results
		if(self.qry_nologic.replace(" ", "") == ""):
			self.results = []
			return self.results
						
		self.cleancache()
		#~ cache hit, no server report
		self.returncode_fine['code'] = 2
		self.resultsraw = self.chkforcache(self.wrp.chash64_encode(SearchModule.sanitize_strings(self.qry_nologic)), speed_class_sel)
		if( self.resultsraw is None):
			self.resultsraw = SearchModule.performSearch(self.qry_nologic, self.cfg, self.ds )
			self.prepareretcode();
			
		if( self.cgen['smartsearch'] == 1):
			#~ smartsearch
			self.results = summary_results(self.resultsraw, self.qry_nologic, self.logic_items)
		else:
			#~ no cleaning just flatten in one array
			self.results = []
			for provid in xrange(len(self.resultsraw)):
				for z in xrange(len(self.resultsraw[provid])):
					if (self.resultsraw[provid][z]['title'] != None):
						self.results.append(self.resultsraw[provid][z])
Beispiel #3
0
    def generate_music_nabresponse(self):
        dstring = []
        if self.args.has_key("artist"):
            dstring.append(SearchModule.sanitize_strings(self.args["artist"]))
        if self.args.has_key("album"):
            dstring.append(SearchModule.sanitize_strings(self.args["album"]))
        if self.args.has_key("track"):
            dstring.append(SearchModule.sanitize_strings(self.args["track"]))
        if self.args.has_key("year"):
            dstring.append(SearchModule.sanitize_strings(self.args["year"]))

        music_search_str = ""
        for i in xrange(len(dstring)):
            if len(dstring[i]) and i < len(dstring) - 1:
                music_search_str = music_search_str + dstring[i]

        print music_search_str
        # ~ print movie_search_str
        self.searchstring = music_search_str
        self.typesearch = 0
        # ~ compile results
        # ~ results = SearchModule.performSearch(movie_search_str, self.cfg )
        # ~ flatten and summarize them
        # ~ cleaned_results = megasearch.summary_results(results,movie_search_str)
        # ~ render XML
        # ~ return self.cleanUpResultsXML(cleaned_results)
        return "m"
Beispiel #4
0
	def generate_tvserie_nabresponse(self,tvrage_show ):
		#~ compile string
		season_num = self.args.get('season',-1, type=int)
		relaxed_seasonmatch = 0
		serie_search_str = SearchModule.sanitize_strings(tvrage_show['showtitle'])
		if(self.args.has_key('ep')):
			ep_num = self.args.get('ep',-1, type=int)
			serie_search_str = serie_search_str + '.s%02d' % season_num + 'e%02d' % ep_num
		else:
			serie_search_str = serie_search_str + '.s%02d' % season_num
			relaxed_seasonmatch = 1

		self.typesearch = 1
		self.searchstring = serie_search_str
		#~ compile results
		results = SearchModule.performSearch(serie_search_str, self.cfg , self.cfg_ds )

		cleaned_results = []
		if(relaxed_seasonmatch):
			#~ no cleaning just flatten in one array
			for provid in xrange(len(results)):
				if(results[provid] is not None):
					for z in xrange(len(results[provid])):
						cleaned_results.append(results[provid][z])
		else:
			#~ flatten and summarize them
			cleaned_results = megasearch.summary_results(results,serie_search_str)

		#~ render XML
		return self.cleanUpResultsXML(cleaned_results)
Beispiel #5
0
	def generate_tsearch_nabresponse(self):

		if(self.args.has_key('q')):
			freesearch_str = SearchModule.sanitize_strings(self.args['q'])
			self.searchstring = freesearch_str
			self.typesearch = 2
			#~ compile results
			results = SearchModule.performSearch(freesearch_str, self.cfg, self.cfg_ds )
			#~ flatten and summarize them
			cleaned_results = megasearch.summary_results(results, freesearch_str)
			#~ render XML
			return self.cleanUpResultsXML(cleaned_results)
Beispiel #6
0
	def __init__(self):
		import urlparse

		cfgsets = config_settings.CfgSettings()
		
		self.cgen = cfgsets.cgen
		self.logsdir = SearchModule.resource_path('logs/nzbmegasearch.log')
		self.scriptsdir = SearchModule.resource_path('get_stats.sh')
		self.cfg_urlidx = []
		self.excludeurls= ['http://ftdworld.net', 'https://nzbx.co']
		if(cfgsets.cfg is not None):
			self.config = cfgsets.cfg
			for i in xrange(len(self.config)):
				if(self.config[i]['builtin'] == 0):
					self.cfg_urlidx.append(i)
Beispiel #7
0
def search():
    data = request.query.data
    str_lim = request.query.limit
    columns = request.query.getall("selected[]")
    logging.debug("search(). columns=" + str(columns))
    if(str_lim == ''):
        limit = 0
    else:
        limit = int(str_lim)
    callback_name = cgi.escape(request.query.callback)
    logging.debug("search(). callback=" + str(callback_name))
    res = SearchModule.search_by_id(data, limit, columns, True)
    add_list_to_process_queue(res[0:10])

    # to only show a few columns (uggly stuff)
    if(len(columns) == 0):
        show = ["sha1", "description", "size"]
    else:
        show = ["sha1"]
        for col in columns:
            dic = tree_menu.ids[int(col)]
            path = str(dic["path"]).split('.')[-1]
            show.append(path)

    responsex = {}
    responsex["normal"] = res
    responsex["show"] = show

    return jsonp(clean_tree(responsex), callback_name)
Beispiel #8
0
def search():
    data = request.query.data
    str_lim = request.query.limit
    columns = request.query.getall("selected[]")
    #print(request.query.keys())
    print(columns)
    if (str_lim == ''):
        limit = 0
    else:
        limit = int(str_lim)
    callback_name = cgi.escape(request.query.callback)
    print "callback=" + str(callback_name)
    res = SearchModule.search_by_id(data, limit, columns)
    add_list_to_process_queue(res[0:10])

    #para que muestre solo algunas columnas (gronchada)
    if (len(columns) == 0):
        show = ["sha1", "description", "size"]
    else:
        show = ["sha1"]
        for col in columns:
            dic = tree_menu.ids[int(col)]
            path = str(dic["path"]).split('.')[-1]
            show.append(path)

    responsex = {}
    responsex["normal"] = res
    responsex["show"] = show

    return jsonp(responsex, callback_name)
Beispiel #9
0
def api_batch_process_file():
    logging.debug("api_batch_process_file(): Running Batch process")
    file_hashes = request.forms.get('file_hash')
    # transform file_hashes in a list of hashes
    if file_hashes is None:
        return jsonize({"Error: file_hash parameter is missing."})
    not_found = []
    added_to_queue = 0
    downloaded_from_vt = 0
    for hash_id in file_hashes.split("\n"):
        hash_id = clean_hash(hash_id)
        if hash_id is None:
            continue
        data = "1=" + str(hash_id)
        res = SearchModule.search_by_id(data, 1, [], True)
        if (len(res) == 0):
            not_found.append(hash_id)
            continue
        else:
            sha1 = res[0]["sha1"]

        added_to_queue += 1
        logging.debug(str(hash_id) + " added to queue")
        add_hash_to_process_queue(sha1)

    responsex = str(added_to_queue) + " files added to the process queue.\n"
    if (downloaded_from_vt > 0):
        responsex += str(downloaded_from_vt) + " new hashes.\n"
    if (len(not_found) != 0):
        responsex += str(len(not_found)) + " hashes not found.\n"
        responsex += "Not Found:\n"
        for aux in not_found:
            responsex = responsex + str(aux) + "\n"

    return jsonize({"message": responsex})
Beispiel #10
0
    def __init__(self, conf, cgen):
        self.trendsdir = SearchModule.resource_path('logs/')
        self.timeout = cgen['default_timeout']
        self.movie_trend = []
        self.movie_trend_ts = 0
        self.show_trend = []
        self.show_trend_ts = 0
        self.sugg_info = []
        self.active_trend = 1
        self.trends_refreshrate = cgen['trends_refreshrate']
        self.detached_trendpolling = cgen['large_server']
        self.best_k = cgen['trends_qty']
        self.cgen = cgen
        self.logic_expr = re.compile("(?:^|\s)([-+])(\w+)")
        self.predb_info = []
        self.tvrage_rqheaders = {
            'Connection': 'keep-alive;',
            'Cache-Control': 'max-age=0',
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'User-Agent':
            'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17',
            'Referer': 'http://services.tvrage.com/info.php?page=main',
            'Accept-Encoding': 'gzip,deflate,sdch',
            'Accept-Language': 'en-US,en;q=0.8',
            'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'
        }

        if (int(cgen['general_trend']) == 0):
            self.active_trend = 0
Beispiel #11
0
def search():
    data = request.query.data
    str_lim = request.query.limit
    columns = request.query.getall("selected[]")
    logging.debug("search(). columns=" + str(columns))
    if (str_lim == ''):
        limit = 0
    else:
        limit = int(str_lim)
    callback_name = cgi.escape(request.query.callback)
    logging.debug("search(). callback=" + str(callback_name))
    res = SearchModule.search_by_id(data, limit, columns, True)
    add_list_to_process_queue(res[0:10])

    # to only show a few columns (uggly stuff)
    if (len(columns) == 0):
        show = ["sha1", "description", "size"]
    else:
        show = ["sha1"]
        for col in columns:
            dic = tree_menu.ids[int(col)]
            path = str(dic["path"]).split('.')[-1]
            show.append(path)

    responsex = {}
    responsex["normal"] = res
    responsex["show"] = show

    return jsonp(clean_tree(responsex), callback_name)
	def __init__(self, conf, cgen):
		self.trendsdir = SearchModule.resource_path('logs/')
		self.timeout = cgen['default_timeout']
		self.movie_trend = []
		self.movie_trend_ts = 0
		self.show_trend = []
		self.show_trend_ts = 0
		self.sugg_info = []
		self.active_trend = 1
		self.trends_refreshrate = cgen['trends_refreshrate']
		self.detached_trendpolling = cgen['large_server']
		self.best_k	= cgen['trends_qty']
		self.cgen	= cgen
		self.logic_expr = re.compile("(?:^|\s)([-+])(\w+)")
		self.predb_info = []
		self.tvrage_rqheaders = {
						'Connection': 'keep-alive;' ,
						'Cache-Control': 'max-age=0',
						'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
						'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17',
						'Referer': 'http://services.tvrage.com/info.php?page=main',
						'Accept-Encoding': 'gzip,deflate,sdch',
						'Accept-Language': 'en-US,en;q=0.8',
						'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'
						 }

		if(int(cgen['general_trend']) == 0):
			self.active_trend = 0
Beispiel #13
0
def search():   
    data=request.query.data
    str_lim=request.query.limit
    columns=request.query.getall("selected[]")
    #print(request.query.keys())
    print(columns)
    if(str_lim==''):
        limit=0
    else:
        limit=int(str_lim)
    callback_name = cgi.escape(request.query.callback)
    print "callback="+str(callback_name)
    res=SearchModule.search_by_id(data,limit,columns)
    add_list_to_process_queue(res[0:10]) 
    
    #para que muestre solo algunas columnas (gronchada)
    if(len(columns)==0):
        show=["sha1","description","size"]
    else:
        show=["sha1"]
        for col in columns:
            dic=tree_menu.ids[int(col)]
            path=str(dic["path"]).split('.')[-1]
            show.append(path)
    
    responsex={}
    responsex["normal"]=res
    responsex["show"]=show
        
    return jsonp(responsex,callback_name)
	def	filter_obsolete_providers(self):
		
		#~ avoids obsolete modules to appear in the search routine
		#~ this is an additional safety measure
		if 'loadedModules' not in globals():
			SearchModule.loadSearchModules()
		
		saved_cfg = []
		for index in xrange(len(self.cfg)):
			index_found = False
			for module in SearchModule.loadedModules:
				if( module.typesrch == self.cfg[index]['type']):
					index_found = True
			if(index_found is True):
				saved_cfg.append(self.cfg[index])
		self.cfg = copy.deepcopy(saved_cfg)
Beispiel #15
0
def api_batch_process_file():
    logging.debug("api_batch_process_file(): Running Batch process")
    file_hashes = request.forms.get('file_hash')
    # transform file_hashes in a list of hashes
    if file_hashes is None:
        return jsonize({"Error: file_hash parameter is missing."})
    not_found = []
    added_to_queue = 0
    downloaded_from_vt = 0
    for hash_id in file_hashes.split("\n"):
        hash_id = clean_hash(hash_id)
        if hash_id is None:
            continue
        data = "1=" + str(hash_id)
        res = SearchModule.search_by_id(data, 1, [], True)
        if(len(res) == 0):
            not_found.append(hash_id)
            continue
        else:
            sha1 = res[0]["sha1"]

        added_to_queue += 1
        logging.debug(str(hash_id) + " added to queue")
        add_hash_to_process_queue(sha1)

    responsex = str(added_to_queue) + " files added to the process queue.\n"
    if(downloaded_from_vt > 0):
        responsex += str(downloaded_from_vt) + " new hashes.\n"
    if(len(not_found) != 0):
        responsex += str(len(not_found)) + " hashes not found.\n"
        responsex += "Not Found:\n"
        for aux in not_found:
            responsex = responsex + str(aux) + "\n"

    return jsonize({"message": responsex})
Beispiel #16
0
def get_result_from_av():
    hash_id=request.query.file_hash
    if len(hash_id) == 0:
        response.code = 400
        return jsonize({'error': 4, 'error_message':'file_hash parameter is missing.'})
    hash_id=clean_hash(hash_id)
    if not valid_hash(hash_id):
        return jsonize({'error': 5, 'error_message':'Invalid hash format.'})
    if(len(hash_id)!=40):
        data="1="+str(hash_id)
        res=SearchModule.search_by_id(data,1,[],True)
        if(len(res)==0):
            response.code = 400
            return jsonize({'error': 6, 'error_message':'File not found'})
        else:
            sha1=res[0]["sha1"]
    else:
        sha1=hash_id
    if(vt_key()):
        av_result=get_av_result(sha1)
    else:
        return jsonize({'error': 7, "error_message": "Error: VirusTotal API key missing from secrets.py file"})
    if(av_result==None):
        return jsonize({"error": 8, "error_message": "Cannot get analysis (hash not found in VT? out of credits?)"})
    return jsonize({"message": "AV scans downloaded."})
Beispiel #17
0
 def ask(self, arguments):
     self.args = arguments
     self.search_str = SearchModule.sanitize_strings(
         self.args['q']).replace(".", " ")
     movieinfo = self.imdb_titlemovieinfo()
     sugg_info_raw = self.movie_bestmatch(movieinfo)
     self.sugg_info = self.prepareforquery(sugg_info_raw)
Beispiel #18
0
    def dosearch(self, args):
        #~ restore originals
        self.cfg = copy.deepcopy(self.cfg_cpy)

        if ('q' not in args):
            self.results = []
            return self.results
        nuqry = args['q'] + ' ' + self.cgen['searchaddontxt']
        self.logic_items = self.logic_expr.findall(nuqry)
        self.qry_nologic = self.logic_expr.sub(" ", nuqry)
        if ('selcat' in args):
            if (args['selcat'] != ""):
                self.qry_nologic += " " + args['selcat']

        #~ speed class
        speed_class_sel = 1
        if ('tm' in args):
            speed_class_sel = int(args['tm'])

        #~ speed class deepsearch
        self.ds.set_timeout_speedclass(speed_class_sel)
        #~ speed class Nabbased
        for conf in self.cfg:
            if ((conf['speed_class'] <= speed_class_sel) and (conf['valid'])):
                conf['timeout'] = self.cgen['timeout_class'][speed_class_sel]
                #~ print conf['type'] + " " + str(conf['timeout'] ) + ' ' + str(speed_class_sel )
            else:
                conf['valid'] = 0

        if (len(args['q']) == 0):
            if ('selcat' in args):
                if (len(args['selcat']) == 0):
                    self.results = []
                    return self.results
            else:
                self.results = []
                return self.results
        if (self.qry_nologic.replace(" ", "") == ""):
            self.results = []
            return self.results

        self.cleancache()
        self.resultsraw = self.chkforcache(
            self.wrp.chash64_encode(self.qry_nologic), speed_class_sel)
        if (self.resultsraw is None):
            self.resultsraw = SearchModule.performSearch(
                self.qry_nologic, self.cfg, self.ds)

        if (self.cgen['smartsearch'] == 1):
            #~ smartsearch
            self.results = summary_results(self.resultsraw, self.qry_nologic,
                                           self.logic_items)
        else:
            #~ no cleaning just flatten in one array
            self.results = []
            for provid in xrange(len(self.resultsraw)):
                for z in xrange(len(self.resultsraw[provid])):
                    if (self.resultsraw[provid][z]['title'] != None):
                        self.results.append(self.resultsraw[provid][z])
def html_builtin_output(cffile, genopt): 
	count = 0
	
	if 'SearchModule.loadedModules' not in globals():
		SearchModule.loadSearchModules()
	
	cffileb = []		
	for module in SearchModule.loadedModules:
		if(module.builtin):
			option='checked=yes'
			flogin=0
			login_name =  ''
			login_pwd = ''
			if(module.active == 0):
				option=''
			for i in xrange(len(cffile)):
				if(cffile[i]['type'] == module.typesrch):
					if(cffile[i]['valid'] == '0'):
						option=''
					else: 	
						option='checked=yes'
					
					login_name=cffile[i]['login']
					login_pwd=cffile[i]['pwd']
					
			if(module.login == 1):
				flogin = 1
			
			tmpcfg= {'stchk' : option,
					'humanname' : module.name,
					'idx' : count,
					'type' : module.typesrch,
					'flogin': flogin,
					'loginname': login_name,
					'loginpwd': login_pwd,
					}
			cffileb.append(tmpcfg)
			count = count + 1

	count = 0
	for i in xrange(len(cffile)):
		if(cffile[i]['builtin'] == 0):
			cffile[i]['idx'] =  count
			count = count + 1

	return render_template('config.html', cfg=cffile, cnt=count,  genopt = genopt, cnt_max=MAX_PROVIDER_NUMBER, cfg_bi=cffileb)
Beispiel #20
0
	def generate_tvserie_nabresponse(self,tvrage_show ):
		#~ compile string
		season_num = self.args.get('season',-1, type=int)
		serie_search_str = SearchModule.sanitize_strings(tvrage_show['showtitle'])
		if(self.args.has_key('ep')):
			ep_num = self.args.get('ep',-1, type=int)			
			serie_search_str = serie_search_str + '.s%02d' % season_num + 'e%02d' % ep_num
		else:	
			serie_search_str = serie_search_str + '.s%02d' % season_num 
		self.typesearch = 1
		self.searchstring = serie_search_str
		#~ compile results				
		results = SearchModule.performSearch(serie_search_str, self.cfg )		
		#~ flatten and summarize them
		cleaned_results = megasearch.summary_results(results,serie_search_str)
		#~ render XML
		return self.cleanUpResultsXML(cleaned_results)
Beispiel #21
0
	def generate_tvserie_nabresponse(self,tvrage_show ):
		#~ compile string
		season_num = self.args.get('season',-1, type=int)
		serie_search_str = SearchModule.sanitize_strings(tvrage_show['showtitle'])
		if(self.args.has_key('ep')):
			ep_num = self.args.get('ep',-1, type=int)			
			serie_search_str = serie_search_str + '.s%02d' % season_num + 'e%02d' % ep_num
		else:	
			serie_search_str = serie_search_str + '.s%02d' % season_num 
		self.typesearch = 1
		self.searchstring = serie_search_str
		#~ compile results				
		results = SearchModule.performSearch(serie_search_str, self.cfg )		
		#~ flatten and summarize them
		cleaned_results = megasearch.summary_results(results,serie_search_str)
		#~ render XML
		return self.cleanUpResultsXML(cleaned_results)
    def filter_obsolete_providers(self):

        #~ avoids obsolete modules to appear in the search routine
        #~ this is an additional safety measure
        if 'loadedModules' not in globals():
            SearchModule.loadSearchModules()

        saved_cfg = []
        if (self.cfg is not None):
            for index in xrange(len(self.cfg)):
                index_found = False
                for module in SearchModule.loadedModules:
                    if (module.typesrch == self.cfg[index]['type']):
                        index_found = True
                if (index_found is True):
                    saved_cfg.append(self.cfg[index])
            self.cfg = copy.deepcopy(saved_cfg)
Beispiel #23
0
def api_batch_process_debug_file():
    yield "<html><body><pre>"
    yield "Running Batch process\n"
    file_hashes = request.forms.get('file_hash')
    #print(dir(request.forms))
    #print(request.forms.keys())
    #transformar file_hashes a una lista de hashes
    not_found = []
    added_to_queue = 0
    downloaded_from_vt = 0
    for hash_id in file_hashes.split("\n"):
        hash_id = clean_hash(hash_id)
        if hash_id is None:
            continue
        data = "1=" + hash_id
        res = SearchModule.search_by_id(data, 1, [], False)
        if (len(res) == 0):
            print "downloading " + str(hash_id) + " from vt"
            sha1 = SearchModule.add_file_from_vt(hash_id)
            if (sha1 == None):
                not_found.append(hash_id)
                continue
            else:
                downloaded_from_vt += 1
        else:
            sha1 = res[0]["sha1"]

        added_to_queue += 1
        add_hash_to_process_queue(sha1)
        if (env['auto_get_av_result']):
            get_av_result(sha1)
        yield str(sha1) + "\n"

    responsex = str(added_to_queue) + " files added to the process queue.\n"
    if (downloaded_from_vt > 0):
        responsex += str(downloaded_from_vt) + " new hashes.\n"
    if (len(not_found) != 0):
        responsex += str(len(not_found)) + " hashes not found.\n"
        responsex += "Not Found:\n"
        for aux in not_found:
            responsex = responsex + str(aux) + "\n"
    yield responsex
    yield "END"
Beispiel #24
0
def api_batch_process_debug_file():
    yield "<html><body><pre>"
    yield "Running Batch process\n"
    file_hashes=request.forms.get('file_hash')
    #print(dir(request.forms))
    #print(request.forms.keys())
    #transformar file_hashes a una lista de hashes
    not_found=[]
    added_to_queue=0
    downloaded_from_vt=0
    for hash_id in file_hashes.split("\n"):
        hash_id=clean_hash(hash_id)
        if hash_id is None:
            continue
        data="1="+hash_id
        res=SearchModule.search_by_id(data,1,[],False)
        if(len(res)==0):
            print "downloading "+str(hash_id)+" from vt"
            sha1=SearchModule.add_file_from_vt(hash_id)
            if(sha1==None):
                not_found.append(hash_id)
                continue
            else:
                downloaded_from_vt+=1
        else:    
            sha1=res[0]["sha1"]

        added_to_queue+=1    
        add_hash_to_process_queue(sha1)
	yield str(sha1)+"\n"
   
    responsex=str(added_to_queue)+" files added to the process queue.\n"
    if(downloaded_from_vt > 0):
        responsex+=str(downloaded_from_vt)+" new hashes.\n"
    if(len(not_found)!=0):
        responsex+=str(len(not_found))+ " hashes not found.\n"
        responsex+="Not Found:\n"
        for aux in not_found:
            responsex=responsex+str(aux)+"\n"
    yield responsex
    yield "END"
Beispiel #25
0
	def prepareforquery_show(self, sugg_info_raw, lastepisode, sugg_info):	
		
		for i in xrange(len(lastepisode)):
			si = {'searchstr': SearchModule.sanitize_strings(sugg_info_raw['title']) 
								+ '.S%02d' % int(lastepisode[i]['season']) 
								+  'E%02d' %  int(lastepisode[i]['ep']),
				  'prettytxt': sugg_info_raw['title'] +  ' S%02d ' %  int(lastepisode[i]['season']) 
								+ 'E%02d' %  int(lastepisode[i]['ep']),
				  'imdb_url': sugg_info_raw['tvdb_url']}
			sugg_info.append(si)
		
		return sugg_info 
	def prepareforquery_show(self, sugg_info_raw, lastepisode, sugg_info):	
		
		for i in xrange(len(lastepisode)):
			si = {'searchstr': SearchModule.sanitize_strings(sugg_info_raw['title']) 
								+ '.S%02d' % int(lastepisode[i]['season']) 
								+  'E%02d' %  int(lastepisode[i]['ep']),
				  'prettytxt': sugg_info_raw['title'] +  ' S%02d ' %  int(lastepisode[i]['season']) 
								+ 'E%02d' %  int(lastepisode[i]['ep']),
				  'imdb_url': sugg_info_raw['tvdb_url']}
			sugg_info.append(si)
		
		return sugg_info 
Beispiel #27
0
def get_package_file():
    tmp_folder="/tmp/mass_download"
    subprocess.call(["mkdir","-p",tmp_folder]) 
    hashes = request.forms.dict.get("file_hash[]")
    if hashes is None:
        hashes = request.forms.get("file_hash").split("\n")
    if hashes is not None:
        if len(hashes) == 1:
            random_id=hashes[0]
        else:
            random_id = id_generator()
    else:
        return jsonize({'message':'Error. no file selected'})
    folder_path=os.path.join(tmp_folder,random_id)
    subprocess.call(["mkdir","-p",folder_path]) 
    zip_name=os.path.join(tmp_folder,random_id+".zip")
    
    pc=PackageController()
    
    for file_hash in hashes:
        file_hash = clean_hash(file_hash.replace('\r',''))
        
        data="1="+file_hash
        res=SearchModule.search_by_id(data,1)
        if(len(res)==0):
            pass
        else:    
            file_hash=res[0]["sha1"]
                
        res=pc.searchFile(file_hash)
        if res != 1 and res is not None:
            res=pc.getFile(file_hash) 
            file_name=os.path.join(folder_path,str(file_hash)+".codex")
            fd=open(file_name,"wb")
            fd.write(res)
            fd.close()
        elif res == 1:
            fd=open(os.path.join(folder_path,'readme.txt'),'a+')
            fd.write(str(file_hash)+" is not available to download.\n")
            fd.close()
        elif res is None:
            fd=open(os.path.join(folder_path,'readme.txt'),'a+')
            fd.write(str(file_hash)+" not found.")
            fd.close()
        else:
            print "Unknown res:"+str(res)
    
    subprocess.call(["zip","-P","codex","-jr", zip_name,folder_path])
    resp =  static_file(str(random_id)+".zip",root=tmp_folder,download=True)
    resp.set_cookie('fileDownload','true');
    # http://johnculviner.com/jquery-file-download-plugin-for-ajax-like-feature-rich-file-downloads/
    return resp
Beispiel #28
0
def get_package_file():
    tmp_folder = "/tmp/mass_download"
    subprocess.call(["mkdir", "-p", tmp_folder])
    hashes = request.forms.dict.get("file_hash[]")
    if hashes is None:
        hashes = request.forms.get("file_hash").split("\n")
    if hashes is not None:
        if len(hashes) == 1:
            random_id = hashes[0]
        else:
            random_id = id_generator()
    else:
        return jsonize({'message': 'Error. no file selected'})
    folder_path = os.path.join(tmp_folder, random_id)
    subprocess.call(["mkdir", "-p", folder_path])
    zip_name = os.path.join(tmp_folder, random_id + ".zip")

    pc = PackageController()

    for file_hash in hashes:
        file_hash = clean_hash(file_hash.replace('\r', ''))

        data = "1=" + file_hash
        res = SearchModule.search_by_id(data, 1)
        if (len(res) == 0):
            pass
        else:
            file_hash = res[0]["sha1"]

        res = pc.searchFile(file_hash)
        if res != 1 and res is not None:
            res = pc.getFile(file_hash)
            file_name = os.path.join(folder_path, str(file_hash) + ".codex")
            fd = open(file_name, "wb")
            fd.write(res)
            fd.close()
        elif res == 1:
            fd = open(os.path.join(folder_path, 'readme.txt'), 'a+')
            fd.write(str(file_hash) + " is not available to download.\n")
            fd.close()
        elif res is None:
            fd = open(os.path.join(folder_path, 'readme.txt'), 'a+')
            fd.write(str(file_hash) + " not found.")
            fd.close()
        else:
            print "Unknown res:" + str(res)

    subprocess.call(["zip", "-P", "codex", "-jr", zip_name, folder_path])
    resp = static_file(str(random_id) + ".zip", root=tmp_folder, download=True)
    resp.set_cookie('fileDownload', 'true')
    # http://johnculviner.com/jquery-file-download-plugin-for-ajax-like-feature-rich-file-downloads/
    return resp
Beispiel #29
0
	def generate_movie_nabresponse(self,imdb_show ):

		movie_search_str = imdb_show['movietitle'].lower().replace("'", "").replace("-", " ").replace(":", " ")
		movie_search_str = " ".join(movie_search_str.split()).replace(" ", ".") + '.' +imdb_show['year']

		#~ print movie_search_str
		self.searchstring = movie_search_str
		self.typesearch = 0
		#~ compile results
		results = SearchModule.performSearch(movie_search_str, self.cfg , self.cfg_ds )
		#~ flatten and summarize them
		cleaned_results = megasearch.summary_results(results,movie_search_str)
		#~ render XML
		return self.cleanUpResultsXML(cleaned_results)
Beispiel #30
0
	def generate_movie_nabresponse(self,imdb_show ):

		movie_search_str = imdb_show['movietitle'].lower().replace("'", "").replace("-", " ").replace(":", " ")
		movie_search_str = " ".join(movie_search_str.split()).replace(" ", ".") + '.' +imdb_show['year']
		
		#~ print movie_search_str
		self.searchstring = movie_search_str
		self.typesearch = 0
		#~ compile results				
		results = SearchModule.performSearch(movie_search_str, self.cfg , self.cfg_ds )	
		#~ flatten and summarize them
		cleaned_results = megasearch.summary_results(results,movie_search_str)
		#~ render XML
		return self.cleanUpResultsXML(cleaned_results)
Beispiel #31
0
def get_result_from_av():
    hash_id = request.query.file_hash
    if len(hash_id) == 0:
        response.status = 400
        return jsonize({
            'error': 4,
            'error_message': 'file_hash parameter is missing.'
        })
    hash_id = clean_hash(hash_id)
    if not valid_hash(hash_id):
        return jsonize({'error': 5, 'error_message': 'Invalid hash format.'})
    if (len(hash_id) != 40):
        data = "1=" + str(hash_id)
        res = SearchModule.search_by_id(data, 1, [], True)
        if (len(res) == 0):
            response.status = 400
            return jsonize({'error': 6, 'error_message': 'File not found'})
        else:
            sha1 = res[0]["sha1"]
    else:
        sha1 = hash_id
    key_manager = KeyManager()

    if (key_manager.check_keys_in_secrets()):
        av_result = get_av_result(sha1, 'high')
    else:
        return jsonize({
            'error':
            7,
            "error_message":
            "Error: VirusTotal API key missing from secrets.py file"
        })
    if (av_result.get('status') == "added"):
        return jsonize({"message": "AV scans downloaded."})
    elif (av_result.get('status') == "already_had_it"):
        return jsonize({"message": "File already have AV scans."})
    elif (av_result.get('status') == "not_found"):
        return jsonize({"error": 10, "error_message": "Not found on VT."})
    elif (av_result.get('status') == "no_key_available"):
        return jsonize({
            "error":
            11,
            "error_message":
            "No key available right now. Please try again later."
        })
    else:
        logging.error("av_result for hash=" + str(sha1))
        logging.error("av_result=" + str(av_result))
        return jsonize({"error": 9, "error_message": "Cannot get analysis."})
Beispiel #32
0
    def generate_tvserie_nabresponse_broadcast(self):

        addparams = dict(age="1500", t="tvsearch", cat="5040,5030")

        rawResults = SearchModule.performSearch("", self.cfg, self.cfg_ds, addparams)
        # ~ rawResults = SearchModule.performSearch('', self.cfg, None, addparams)
        results = []
        # ~ no cleaning just flatten in one array
        for provid in xrange(len(rawResults)):
            for z in xrange(len(rawResults[provid])):
                results.append(rawResults[provid][z])

        self.searchstring = ""
        self.typesearch = 1
        return self.cleanUpResultsXML(results)
Beispiel #33
0
	def prepareforquery(self, sugg_info_raw):	
		
		sugg_info = []
		for i in xrange(len(sugg_info_raw)):
			shorinfo = sugg_info_raw[i]['title']
			if (len(shorinfo) > MAX_CHAR_LEN):
				shorinfo = shorinfo[0:MAX_CHAR_LEN-2] + '..'
			si = {'searchstr': SearchModule.sanitize_strings(sugg_info_raw[i]['title']) +  '.' + sugg_info_raw[i]['year'] ,
				  'prettytxt': shorinfo + '('+ sugg_info_raw[i]['year'] + ')',
				  'imdb_url': sugg_info_raw[i]['imdb_url']}
			
			sugg_info.append(si)	  			
			#~ print si
			#~ print 'dcdddddddddddddddd'

		return sugg_info
	def prepareforquery(self, sugg_info_raw):	
		
		sugg_info = []
		for i in xrange(len(sugg_info_raw)):
			shorinfo = sugg_info_raw[i]['title']
			if (len(shorinfo) > MAX_CHAR_LEN):
				shorinfo = shorinfo[0:MAX_CHAR_LEN-2] + '..'
			si = {'searchstr': SearchModule.sanitize_strings(sugg_info_raw[i]['title']) +  '.' + sugg_info_raw[i]['year'] ,
				  'prettytxt': shorinfo + '('+ sugg_info_raw[i]['year'] + ')',
				  'imdb_url': sugg_info_raw[i]['imdb_url']}
			
			sugg_info.append(si)	  			
			#~ print si
			#~ print 'dcdddddddddddddddd'

		return sugg_info
Beispiel #35
0
    def generate_tvserie_nabresponse_broadcast(self):

        addparams = dict(age='1500', t='tvsearch', cat='5040,5030')

        rawResults = SearchModule.performSearch('', self.cfg, self.cfg_ds,
                                                addparams)
        #~ rawResults = SearchModule.performSearch('', self.cfg, None, addparams)
        results = []
        #~ no cleaning just flatten in one array
        for provid in xrange(len(rawResults)):
            for z in xrange(len(rawResults[provid])):
                results.append(rawResults[provid][z])

        self.searchstring = ''
        self.typesearch = 1
        return self.cleanUpResultsXML(results)
Beispiel #36
0
	def generate_tvserie_nabresponse_broadcast(self):
		
		addparams = dict(
						age= '1500',
						t='tvsearch',
						cat='5040,5030')
		
		rawResults = SearchModule.performSearch('', self.cfg, None, addparams)
		results = []
		#~ no cleaning just flatten in one array
		for provid in xrange(len(rawResults)):
			for z in xrange(len(rawResults[provid])):
 				results.append(rawResults[provid][z])

		self.searchstring = ''
		return self.cleanUpResultsXML(results)
Beispiel #37
0
def api_batch_process_file():
    print("Running Batch process")
    file_hashes = request.forms.get('file_hash')
    #print(dir(request.forms))
    #print(request.forms.keys())
    #transformar file_hashes a una lista de hashes
    not_found = []
    added_to_queue = 0
    downloaded_from_vt = 0
    for hash_id in file_hashes.split("\n"):
        hash_id = clean_hash(hash_id)
        if hash_id is None:
            continue
        data = "1=" + str(hash_id)
        res = SearchModule.search_by_id(data, 1, [], True)
        if (len(res) == 0):
            not_found.append(hash_id)
            continue
            """
            print "downloading "+str(hash_id)+" from vt"
            sha1=SearchModule.add_file_from_vt(hash_id)
            if(sha1==None):
                print "not found on vt: "+str(hash_id)
                not_found.append(hash_id)
                continue
            else:
                downloaded_from_vt+=1
            """
        else:
            sha1 = res[0]["sha1"]

        added_to_queue += 1
        print str(hash_id) + " added to queue"
        add_hash_to_process_queue(sha1)
        if (env['auto_get_av_result']):
            get_av_result(sha1)

    responsex = str(added_to_queue) + " files added to the process queue.\n"
    if (downloaded_from_vt > 0):
        responsex += str(downloaded_from_vt) + " new hashes.\n"
    if (len(not_found) != 0):
        responsex += str(len(not_found)) + " hashes not found.\n"
        responsex += "Not Found:\n"
        for aux in not_found:
            responsex = responsex + str(aux) + "\n"

    return jsonize({"message": responsex})
Beispiel #38
0
    def dosearch(self, args):
        #~ restore originals
        self.cfg = copy.deepcopy(self.cfg_cpy)

        if ('q' not in args):
            self.results = []
            return self.results

        self.logic_items = self.logic_expr.findall(args['q'])
        self.qry_nologic = self.logic_expr.sub(" ", args['q'])
        if ('selcat' in args):
            self.qry_nologic += " " + args['selcat']

        #~ speed class
        speed_class_sel = 1
        if ('tm' in args):
            speed_class_sel = int(args['tm'])

        #~ speed class deepsearch
        self.ds.set_timeout_speedclass(speed_class_sel)
        #~ speed class Nabbased
        for conf in self.cfg:
            if ((conf['speed_class'] <= speed_class_sel) and (conf['valid'])):
                conf['timeout'] = self.cgen['timeout_class'][speed_class_sel]
                #~ print conf['type'] + " " + str(conf['timeout'] ) + ' ' + str(speed_class_sel )
            else:
                conf['valid'] = 0

        if (len(args['q']) == 0):
            if ('selcat' in args):
                if (len(args['selcat']) == 0):
                    self.results = []
                    return self.results
            else:
                self.results = []
                return self.results
        if (self.qry_nologic.replace(" ", "") == ""):
            self.results = []
            return self.results

        self.logic_items = self.logic_expr.findall(args['q'])
        results = SearchModule.performSearch(self.qry_nologic, self.cfg,
                                             self.ds)
        self.results = summary_results(results, self.qry_nologic,
                                       self.logic_items)
Beispiel #39
0
def api_batch_process_file():
    print("Running Batch process")
    file_hashes=request.forms.get('file_hash')
    #print(dir(request.forms))
    #print(request.forms.keys())
    #transformar file_hashes a una lista de hashes
    not_found=[]
    added_to_queue=0
    downloaded_from_vt=0
    for hash_id in file_hashes.split("\n"):
        hash_id=clean_hash(hash_id)
        if hash_id is None:
            continue
        data="1="+str(hash_id)
        res=SearchModule.search_by_id(data,1,[],False)
        if(len(res)==0):
            not_found.append(hash_id)
            continue
            """
            print "downloading "+str(hash_id)+" from vt"
            sha1=SearchModule.add_file_from_vt(hash_id)
            if(sha1==None):
                print "not found on vt: "+str(hash_id)
                not_found.append(hash_id)
                continue
            else:
                downloaded_from_vt+=1
            """
        else:    
            sha1=res[0]["sha1"]

        added_to_queue+=1    
        print str(hash_id)+" added to queue"
        add_hash_to_process_queue(sha1)
   
    responsex=str(added_to_queue)+" files added to the process queue.\n"
    if(downloaded_from_vt > 0):
        responsex+=str(downloaded_from_vt)+" new hashes.\n"
    if(len(not_found)!=0):
        responsex+=str(len(not_found))+ " hashes not found.\n"
        responsex+="Not Found:\n"
        for aux in not_found:
            responsex=responsex+str(aux)+"\n"
    
    return jsonize({"message":responsex}) 
Beispiel #40
0
    def dosearch(self, args):
        # ~ restore originals
        self.cfg = copy.deepcopy(self.cfg_cpy)

        if "q" not in args:
            self.results = []
            return self.results

        self.logic_items = self.logic_expr.findall(args["q"])
        self.qry_nologic = self.logic_expr.sub(" ", args["q"])
        if "selcat" in args:
            self.qry_nologic += " " + args["selcat"]

            # ~ speed class
        speed_class_sel = 1
        if "tm" in args:
            speed_class_sel = int(args["tm"])

            # ~ speed class deepsearch
        self.ds.set_timeout_speedclass(speed_class_sel)
        # ~ speed class Nabbased
        for conf in self.cfg:
            if (conf["speed_class"] <= speed_class_sel) and (conf["valid"]):
                conf["timeout"] = self.cgen["timeout_class"][speed_class_sel]
                # ~ print conf['type'] + " " + str(conf['timeout'] ) + ' ' + str(speed_class_sel )
            else:
                conf["valid"] = 0

        if len(args["q"]) == 0:
            if "selcat" in args:
                if len(args["selcat"]) == 0:
                    self.results = []
                    return self.results
            else:
                self.results = []
                return self.results
        if self.qry_nologic.replace(" ", "") == "":
            self.results = []
            return self.results

        self.logic_items = self.logic_expr.findall(args["q"])
        results = SearchModule.performSearch(self.qry_nologic, self.cfg, self.ds)
        self.results = summary_results(results, self.qry_nologic, self.logic_items)
Beispiel #41
0
    def dosearch_rss(self, arguments, hname):
        self.args = arguments
        self.rqurl = hname.scheme + "://" + hname.netloc
        addparams = dict(age="1500", limit="20000", t="search", cat="1000,2000,3000,4000,5000,6000,7000")

        if "cat" in self.args:
            addparams["cat"] = self.args["cat"]

        rawResults = SearchModule.performSearch("", self.cfg, self.cfg_ds, addparams)
        results = []
        # ~ no cleaning just flatten in one array
        for provid in xrange(len(rawResults)):
            if rawResults[provid] is not None:
                for z in xrange(len(rawResults[provid])):
                    results.append(rawResults[provid][z])

        self.searchstring = ""
        self.typesearch = 3

        return self.cleanUpResultsXML(results)
Beispiel #42
0
def get_result_from_av():
    hash_id = request.query.file_hash
    if len(hash_id) == 0:
        response.status = 400
        return jsonize({'error': 4, 'error_message': 'file_hash parameter is missing.'})
    hash_id = clean_hash(hash_id)
    if not valid_hash(hash_id):
        return jsonize({'error': 5, 'error_message': 'Invalid hash format.'})
    if(len(hash_id) != 40):
        data = "1=" + str(hash_id)
        res = SearchModule.search_by_id(data, 1, [], True)
        if(len(res) == 0):
            response.status = 400
            return jsonize({'error': 6, 'error_message': 'File not found'})
        else:
            sha1 = res[0]["sha1"]
    else:
        sha1 = hash_id
    key_manager = KeyManager()

    if(key_manager.check_keys_in_secrets()):
        av_result = get_av_result(sha1, 'high')
    else:
        return jsonize({'error': 7, "error_message": "Error: VirusTotal API key missing from secrets.py file"})
    if(av_result.get('status') == "added"):
        return jsonize({"message": "AV scans downloaded."})
    elif(av_result.get('status') == "already_had_it"):
        return jsonize({"message": "File already have AV scans."})
    elif(av_result.get('status') == "not_found"):
        return jsonize({"error": 10, "error_message": "Not found on VT."})
    elif(av_result.get('status') == "no_key_available"):
        return jsonize({"error": 11, "error_message": "No key available right now. Please try again later."})
    else:
        logging.error("av_result for hash=" + str(sha1))
        logging.error("av_result=" + str(av_result))
        return jsonize({"error": 9, "error_message": "Cannot get analysis."})
Beispiel #43
0
	def dosearch_rss(self, arguments, hname):
		self.args = arguments
		self.rqurl = hname.scheme+'://'+hname.netloc			
		addparams = dict(
						age= '1500',
						limit='20000',
						t='search',
						cat='1000,2000,3000,4000,5000,6000,7000')
		
		if('cat' in self.args):
			addparams['cat'] = self.args['cat']
			
		rawResults = SearchModule.performSearch('', self.cfg, self.cfg_ds, addparams)
		results = []
		#~ no cleaning just flatten in one array
		for provid in xrange(len(rawResults)):
			if(rawResults[provid] is not None):
				for z in xrange(len(rawResults[provid])):
					results.append(rawResults[provid][z])

		self.searchstring = ''
		self.typesearch = 3

		return self.cleanUpResultsXML(results)
Beispiel #44
0
	def dosearch_rss(self, arguments, hname):
		self.args = arguments
		self.rqurl = hname.scheme+'://'+hname.netloc
		addparams = dict(
						age= '1500',
						limit='20000',
						t='search',
						cat='1000,2000,3000,4000,5000,6000,7000')

		if('cat' in self.args):
			addparams['cat'] = self.args['cat']

		rawResults = SearchModule.performSearch('', self.cfg, self.cfg_ds, addparams)
		results = []
		#~ no cleaning just flatten in one array
		for provid in xrange(len(rawResults)):
			if(rawResults[provid] is not None):
				for z in xrange(len(rawResults[provid])):
					results.append(rawResults[provid][z])

		self.searchstring = ''
		self.typesearch = 3

		return self.cleanUpResultsXML(results)
Beispiel #45
0
    def cleanUpResults(self, params):
        sugg_list = params['sugg']
        results = self.results
        svalid = self.svalid
        args = params['args']
        ver_notify = params['ver']
        niceResults = []
        existduplicates = 0

        #~ tries to match predb entries
        self.matchpredb(results, params['predb'])

        #~ avoids GMT problems
        for i in xrange(len(results)):
            totdays = int(
                (time.time() - results[i]['posting_date_timestamp']) /
                (3600 * 24))
            if (totdays == 0):
                totdays = float(
                    (time.time() - results[i]['posting_date_timestamp']) /
                    (3600))
                if (totdays < 0):
                    totdays = -totdays
                totdays = totdays / 100
            results[i]['posting_date_timestamp_refined'] = float(totdays)

        #~ sorting
        if 'order' not in args:
            results = sorted(results,
                             key=itemgetter('posting_date_timestamp_refined'),
                             reverse=False)
        else:
            if (args['order'] == 't'):
                results = sorted(results, key=itemgetter('title'))
            if (args['order'] == 's'):
                results = sorted(results, key=itemgetter('size'), reverse=True)
            if (args['order'] == 'p'):
                results = sorted(results, key=itemgetter('providertitle'))
            if (args['order'] == 'd'):
                results = sorted(
                    results,
                    key=itemgetter('posting_date_timestamp_refined'),
                    reverse=False)
            if (args['order'] == 'x'):
                results = sorted(results,
                                 key=itemgetter('predb'),
                                 reverse=True)
            if (args['order'] == 'c'):
                results = sorted(results,
                                 key=itemgetter('categ'),
                                 reverse=True)

        #~ do nice
        for i in xrange(len(results)):
            if (results[i]['ignore'] == 2):
                continue

            if (results[i]['ignore'] == 1):
                existduplicates = 1

            # Convert sized to smallest SI unit (note that these are powers of 10, not powers of 2, i.e. OS X file sizes rather than Windows/Linux file sizes)
            szf = float(results[i]['size'] / 1000000.0)
            mgsz = ' MB '
            if (szf > 1000.0):
                szf = szf / 1000
                mgsz = ' GB '
            fsze1 = str(round(szf, 1)) + mgsz

            if (results[i]['size'] == -1):
                fsze1 = 'N/A'
            totdays = results[i]['posting_date_timestamp_refined']
            if (totdays < 1):
                totdays = str(int(totdays * 100)) + "h"
            else:
                totdays = str(int(totdays)) + "d"

            category_str = ''
            keynum = len(results[i]['categ'])
            keycount = 0
            for key in results[i]['categ'].keys():
                category_str = category_str + key
                keycount = keycount + 1
                if (keycount < keynum):
                    category_str = category_str + ' - '
            if (results[i]['url'] is None):
                results[i]['url'] = ""

            qryforwarp = self.wrp.chash64_encode(results[i]['url'])
            if ('req_pwd' in results[i]):
                qryforwarp += '&m=' + results[i]['req_pwd']
            niceResults.append({
                'id':
                i,
                'url':
                results[i]['url'],
                'url_encr':
                'warp?x=' + qryforwarp,
                'title':
                results[i]['title'],
                'filesize':
                fsze1,
                'cat':
                category_str.upper(),
                'age':
                totdays,
                'details':
                results[i]['release_comments'],
                'details_deref':
                'http://www.derefer.me/?' + results[i]['release_comments'],
                'providerurl':
                results[i]['provider'],
                'providertitle':
                results[i]['providertitle'],
                'ignore':
                results[i]['ignore'],
                'predb':
                results[i]['predb'],
                'predb_lnk':
                results[i]['predb_lnk']
            })
        send2nzbget_exist = None
        if ('nzbget_url' in self.cgen):
            if (len(self.cgen['nzbget_url'])):
                send2nzbget_exist = self.sckname

        send2sab_exist = None
        if ('sabnzbd_url' in self.cgen):
            if (len(self.cgen['sabnzbd_url'])):
                send2sab_exist = self.sckname
        speed_class_sel = 1
        if ('tm' in args):
            speed_class_sel = int(args['tm'])

        #~ save for caching
        if (self.resultsraw is not None):
            if (self.cgen['cache_active'] == 1 and len(self.resultsraw) > 0):
                if (len(self.collect_info) < self.cgen['max_cache_qty']):
                    if (self.chkforcache(
                            self.wrp.chash64_encode(
                                SearchModule.sanitize_strings(
                                    self.qry_nologic)), speed_class_sel) is
                            None):
                        collect_all = {}
                        collect_all['searchstr'] = self.wrp.chash64_encode(
                            SearchModule.sanitize_strings(self.qry_nologic))
                        collect_all['tstamp'] = time.time()
                        collect_all['resultsraw'] = self.resultsraw
                        collect_all['speedclass'] = speed_class_sel
                        self.collect_info.append(collect_all)
                        #~ print 'Result added to the cache list'
        #~ ~ ~ ~ ~ ~ ~ ~ ~
        scat = ''
        if ('selcat' in params['args']):
            scat = params['args']['selcat']

        return render_template('main_page.html',
                               results=niceResults,
                               exist=existduplicates,
                               vr=ver_notify,
                               args=args,
                               nc=svalid,
                               sugg=sugg_list,
                               speed_class_sel=speed_class_sel,
                               send2sab_exist=send2sab_exist,
                               send2nzbget_exist=send2nzbget_exist,
                               cgen=self.cgen,
                               trend_show=params['trend_show'],
                               trend_movie=params['trend_movie'],
                               debug_flag=params['debugflag'],
                               sstring=params['args']['q'],
                               scat=scat,
                               selectable_opt=params['selectable_opt'],
                               search_opt=params['search_opt'],
                               sid=params['sid'],
                               servercode_return=self.returncode_fine,
                               large_server=self.cgen['large_server'],
                               motd=params['motd'])
Beispiel #46
0
import threading
from SuggestionModule import SuggestionResponses
import config_settings
import SearchModule
import logging
import logging.handlers
import time

DEBUGFLAG = True

motd = '\n\n~*~ ~*~ NZBMegasearcH detached trend ~*~ ~*~'
print motd

#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
cfgsets = config_settings.CfgSettings()
logsdir = SearchModule.resource_path('logs/')
logging.basicConfig(
    filename=logsdir + 'nzbmegasearch_detachedtrend.log',
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
handler = logging.handlers.RotatingFileHandler(
    logsdir + 'nzbmegasearch_detachedtrend.log',
    maxBytes=cfgsets.cgen['log_size'],
    backupCount=cfgsets.cgen['log_backupcount'])
log.addHandler(handler)
log.info(motd)
cfgsets.cgen['trends'] = 1
cfgsets.cgen['search_suggestions'] = 1
cfgsets.cgen['large_server'] = False
sugg = SuggestionResponses(cfgsets.cfg, cfgsets.cgen)
Beispiel #47
0
def api_batch_process_debug_file():
    yield "<html><body><pre>"
    yield "Running Batch process\n"
    file_hashes = request.forms.get('file_hash')
    if file_hashes is None:
        response.status = 422
        logging.debug("api_batch_process_debug_file(): file_hash is missing")
        yield "file_hash parameter is missing"

    # transform file_hashes in a list of hashes.
    not_found = []
    added_to_queue = 0
    downloaded_from_vt = 0
    for hash_id in file_hashes.split("\n"):
        hash_id = clean_hash(hash_id)
        if hash_id is None:
            continue
        data = "1=" + hash_id
        if (len(hash_id) == 40 or len(hash_id) == 32):
            pc = PackageController()
            res = pc.getFile(hash_id)
            if res is not None and len(
                    SearchModule.search_by_id(data, 1, [], False)) == 0:
                logging.debug("Processing right now: " + str(hash_id))
                process_file(hash_id)
                if (env['auto_get_av_result']):
                    add_task_to_download_av_result(hash_id)
                    continue
        res = SearchModule.search_by_id(data, 1, [], False)
        if (len(res) == 0):
            legging.debug("process_debug(): metadata of " + str(hash_id) +
                          " was not found. We will look in Pc. hash length: " +
                          str(len(hash_id)))
            if (len(hash_id) == 40 or len(hash_id) == 32):
                pc = PackageController()
                res = pc.getFile(hash_id)
                if res is not None:
                    logging.debug("process_debug(): hash was found (" +
                                  str(hash_id) + ")")
                else:
                    logging.debug("process_debug(): hash was not found(" +
                                  str(hash_id) + ")")
            logging.debug("process_debug():")
            logging.debug("process_debug(): going to search " + str(hash_id) +
                          " in vt")
            add_response = SearchModule.add_file_from_vt(hash_id)
            sha1 = add_response.get('hash')
            if (sha1 == None):
                logging.debug("process_debug(): sha1 is None: " + str(hash_id))
                not_found.append(hash_id)
                continue
            else:
                downloaded_from_vt += 1
        else:
            sha1 = res[0]["sha1"]

        added_to_queue += 1
        add_hash_to_process_queue(sha1)
        if (env['auto_get_av_result']):
            add_task_to_download_av_result(sha1)
        yield str(sha1) + "\n"

    responsex = str(added_to_queue) + " files added to the process queue.\n"
    if (downloaded_from_vt > 0):
        responsex += str(downloaded_from_vt) + " new hashes.\n"
    if (len(not_found) != 0):
        responsex += str(len(not_found)) + " hashes not found.\n"
        responsex += "Not Found:\n"
        for aux in not_found:
            responsex = responsex + str(aux) + "\n"
    yield responsex
    yield "END"
Beispiel #48
0
    def check(self, args):
        ret = 0

        if (('hostname' in args) and ('type' in args)):

            # Perform the search using every module
            global globalResults
            if 'loadedModules' not in globals():
                SearchModule.loadSearchModules()

            # ~ specials
            if (args['type'] == 'OMG'):
                ret = 1
                cfg_tmp = {
                    'valid': 1,
                    'type': 'OMG',
                    'speed_class': 2,
                    'extra_class': 0,
                    'login': args['user'],
                    'pwd': args['pwd'],
                    'timeout': self.cgen['timeout_class'][2],
                    'builtin': 1
                }
                for module in SearchModule.loadedModules:
                    if (module.typesrch == 'OMG'):
                        module.search('Ubuntu', cfg_tmp)
                print cfg_tmp['retcode']
                if (cfg_tmp['retcode'][0] != 200):
                    ret = 0

            # ~ server based API
            if (args['type'] == 'NAB'):
                ret = 1
                cfg_tmp = {
                    'url': args['hostname'],
                    'type': 'NAB',
                    'api': args['api'],
                    'speed_class': 2,
                    'extra_class': 0,
                    'valid': 1,
                    'timeout': self.cgen['timeout_class'][2],
                    'builtin': 0
                }
                for module in SearchModule.loadedModules:
                    if (module.typesrch == 'NAB'):
                        module.search('Ubuntu', cfg_tmp)
                print cfg_tmp['retcode']
                if (cfg_tmp['retcode'][0] != 200):
                    ret = 0

            # ~ server based WEB
            if (args['type'] == 'DSN' or args['type'] == 'DS_GNG'):

                cfg_deep_tmp = [{
                    'url': args['hostname'],
                    'user': args['user'],
                    'pwd': args['pwd'],
                    'type': args['type'],
                    'speed_class': 2,
                    'extra_class': 0,
                    'valid': 1,
                }]
                ds_tmp = DeepsearchModule.DeepSearch(cfg_deep_tmp, self.cgen)
                ret_bool = ds_tmp.ds[0].search('Ubuntu')
                if (ret_bool):
                    ret = 1
                else:
                    ret = 0

        return ret
Beispiel #49
0
def get_sample_count():
    count = SearchModule.count_documents()
    res = {"count": count}
    return jsonize(res)
Beispiel #50
0
    def cleanUpResults(self, params):
        sugg_list = params['sugg']
        results = self.results
        svalid = self.svalid
        args = params['args']
        ver_notify = params['ver']
        niceResults = []
        existduplicates = 0

        # ~ tries to match predb entries
        self.matchpredb(results, params['predb'])

        # ~ avoids GMT problems
        for i in xrange(len(results)):
            totdays = int((time.time() - results[i]['posting_date_timestamp']) / (3600 * 24))
            if (totdays == 0):
                totdays = float((time.time() - results[i]['posting_date_timestamp']) / (3600))
                if (totdays < 0):
                    totdays = -totdays
                totdays = totdays / 100
            results[i]['posting_date_timestamp_refined'] = float(totdays)

        # ~ sorting
        if 'order' not in args:
            results = sorted(results, key=itemgetter('posting_date_timestamp_refined'), reverse=False)
        else:
            if (args['order'] == 't'):
                results = sorted(results, key=itemgetter('title'))
            if (args['order'] == 's'):
                results = sorted(results, key=itemgetter('size'), reverse=True)
            if (args['order'] == 'p'):
                results = sorted(results, key=itemgetter('providertitle'))
            if (args['order'] == 'd'):
                results = sorted(results, key=itemgetter('posting_date_timestamp_refined'), reverse=False)
            if (args['order'] == 'x'):
                results = sorted(results, key=itemgetter('predb'), reverse=True)
            if (args['order'] == 'c'):
                results = sorted(results, key=itemgetter('categ'), reverse=True)


            # ~ do nice
        for i in xrange(len(results)):
            if (results[i]['posting_date_timestamp_refined'] > self.cgen['daysretention']):
                continue

            if (results[i]['ignore'] == 2):
                continue

            if (results[i]['ignore'] == 1):
                existduplicates = 1

            # Convert sized to smallest SI unit (note that these are powers of 10, not powers of 2, i.e. OS X file sizes rather than Windows/Linux file sizes)
            szf = float(results[i]['size'] / 1000000.0)
            mgsz = ' MB '
            if (szf > 1000.0):
                szf = szf / 1000
                mgsz = ' GB '
            fsze1 = str(round(szf, 1)) + mgsz

            if (results[i]['size'] == -1):
                fsze1 = 'N/A'
            totdays = results[i]['posting_date_timestamp_refined']
            if (totdays < 1):
                totdays = str(int(totdays * 100)) + "h"
            else:
                totdays = str(int(totdays)) + "d"

            category_str = ''
            keynum = len(results[i]['categ'])
            keycount = 0
            for key in results[i]['categ'].keys():
                category_str = category_str + key
                keycount = keycount + 1
                if (keycount < keynum):
                    category_str = category_str + ' - '
            if (results[i]['url'] is None):
                results[i]['url'] = ""

            qryforwarp = self.wrp.chash64_encode(results[i]['url'])
            if ('req_pwd' in results[i]):
                qryforwarp += '&m=' + results[i]['req_pwd']
            niceResults.append({
                'id': i,
                'url': results[i]['url'],
                'url_encr': 'warp?x=' + qryforwarp,
                'title': results[i]['title'],
                'filesize': fsze1,
                'cat': category_str.upper(),
                'age': totdays,
                'details': results[i]['release_comments'],
                'details_deref': 'http://www.derefer.me/?' + results[i]['release_comments'],
                'providerurl': results[i]['provider'],
                'providertitle': results[i]['providertitle'],
                'ignore': results[i]['ignore'],
                'predb': results[i]['predb'],
                'predb_lnk': results[i]['predb_lnk']
            })
        send2nzbget_exist = None
        if ('nzbget_url' in self.cgen):
            if (len(self.cgen['nzbget_url'])):
                send2nzbget_exist = self.sckname

        send2sab_exist = None
        if ('sabnzbd_url' in self.cgen):
            if (len(self.cgen['sabnzbd_url'])):
                send2sab_exist = self.sckname
        speed_class_sel = 1
        if ('tm' in args):
            speed_class_sel = int(args['tm'])

        # ~ save for caching
        if (self.resultsraw is not None):
            if (self.cgen['cache_active'] == 1 and len(self.resultsraw) > 0):
                if (len(self.collect_info) < self.cgen['max_cache_qty']):
                    if (self.chkforcache(self.wrp.chash64_encode(SearchModule.sanitize_strings(self.qry_nologic)),
                                         speed_class_sel) is None):
                        collect_all = {}
                        collect_all['searchstr'] = self.wrp.chash64_encode(
                            SearchModule.sanitize_strings(self.qry_nologic))
                        collect_all['tstamp'] = time.time()
                        collect_all['resultsraw'] = self.resultsraw
                        collect_all['speedclass'] = speed_class_sel
                        self.collect_info.append(collect_all)
                    # ~ print 'Result added to the cache list'
        # ~ ~ ~ ~ ~ ~ ~ ~ ~
        scat = ''
        if ('selcat' in params['args']):
            scat = params['args']['selcat']

        return render_template('main_page.html', results=niceResults, exist=existduplicates,
                               vr=ver_notify, args=args, nc=svalid, sugg=sugg_list,
                               speed_class_sel=speed_class_sel,
                               send2sab_exist=send2sab_exist,
                               send2nzbget_exist=send2nzbget_exist,
                               cgen=self.cgen,
                               trend_show=params['trend_show'],
                               trend_movie=params['trend_movie'],
                               debug_flag=params['debugflag'],
                               sstring=params['args']['q'],
                               scat=scat,
                               selectable_opt=params['selectable_opt'],
                               search_opt=params['search_opt'],
                               sid=params['sid'],
                               servercode_return=self.returncode_fine,
                               large_server=self.cgen['large_server'],
                               motd=params['motd'])
Beispiel #51
0
def summary_results(rawResults, strsearch, logic_items=[]):

    results = []
    titles = []
    sptitle_collection = []

    #~ all in one array
    for provid in xrange(len(rawResults)):
        for z in xrange(len(rawResults[provid])):
            rawResults[provid][z]['title'] = SearchModule.sanitize_html(
                rawResults[provid][z]['title'])
            title = SearchModule.sanitize_strings(
                rawResults[provid][z]['title'])
            titles.append(title)
            sptitle_collection.append(Set(title.split(".")))
            results.append(rawResults[provid][z])

    strsearch1 = SearchModule.sanitize_strings(strsearch)
    strsearch1_collection = Set(strsearch1.split("."))

    rcount = [0] * 3
    for z in xrange(len(results)):
        findone = 0
        results[z]['ignore'] = 0
        intrs = strsearch1_collection.intersection(sptitle_collection[z])
        if (len(intrs) == len(strsearch1_collection)):
            findone = 1
        else:
            results[z]['ignore'] = 2

        #~ print strsearch1_collection
        #~ print intrs
        #~ print findone
        #~ print '------------------'

        if (findone and results[z]['ignore'] == 0):
            #~ print titles[z]
            for v in xrange(z + 1, len(results)):
                if (titles[z] == titles[v]):
                    sz1 = float(results[z]['size'])
                    sz2 = float(results[v]['size'])
                    if (abs(sz1 - sz2) < 5000000):
                        results[z]['ignore'] = 1
        #~ stats
        rcount[results[z]['ignore']] += 1

    #~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
    #~ logic params
    exclude_coll = Set([])
    include_coll = Set([])
    #~ print '*'+logic_items[0][1]+'*'
    for i in xrange(len(logic_items)):
        if (logic_items[i][0] == '-'):
            exclude_coll.add(logic_items[i][1])
        if (logic_items[i][0] == '+'):
            include_coll.add(logic_items[i][1])
    if (len(include_coll)):
        for z in xrange(len(results)):
            if (results[z]['ignore'] < 2):
                intrs_i = include_coll.intersection(sptitle_collection[z])
                if (len(intrs_i) == 0):
                    results[z]['ignore'] = 2
    if (len(exclude_coll)):
        for z in xrange(len(results)):
            if (results[z]['ignore'] < 2):
                intrs_e = exclude_coll.intersection(sptitle_collection[z])
                if (len(intrs_e) > 0):
                    results[z]['ignore'] = 2
    #~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~

    mssg = '[' + strsearch1 + ']' + ' [' + strsearch + '] ' + str(
        rcount[0]) + ' ' + str(rcount[1]) + ' ' + str(rcount[2])
    print mssg
    log.info(mssg)

    return results
Beispiel #52
0
    def html_editpage(self):

        count = 0
        if 'SearchModule.loadedModules' not in globals():
            SearchModule.loadSearchModules()

        dsearchsupport = DeepsearchModule.supportedengines()

        cffileb = []
        cffile = copy.deepcopy(self.cfg)
        cdsfile = self.cfg_deep
        genopt = self.cgen

        if (cffile is None):
            cffile = []

        if (cdsfile is None):
            cdsfile = []

        cdomainname = megasearch.getdomainext()

        for module in SearchModule.loadedModules:
            if (module.builtin):
                option = 'checked=yes'
                flogin = 0
                flogin_caption_user = '******'
                flogin_caption_pwd = 'pwd'
                login_name = ''
                login_pwd = ''
                speed_cl = 1
                if (module.active == 0):
                    option = ''
                for i in xrange(len(cffile)):
                    if (cffile[i]['type'] == module.typesrch):
                        if (cffile[i]['valid'] == 0):
                            option = ''
                        else:
                            option = 'checked=yes'

                        login_name = cffile[i]['login']
                        login_pwd = cffile[i]['pwd']
                        speed_cl = cffile[i]['speed_class']

                if (module.login == 1):
                    flogin = 1
                    #~ if('caption_login_user' in module):
                    if (hasattr(module, 'caption_login_user')):
                        flogin_caption_user = module.caption_login_user
                        flogin_caption_pwd = module.caption_login_pwd

                tmpcfg = {
                    'stchk': option,
                    'humanname': module.name,
                    'url': '',
                    'idx': count,
                    'speed_class': speed_cl,
                    'type': module.typesrch,
                    'flogin': flogin,
                    'flogin_caption_user': flogin_caption_user,
                    'flogin_caption_pwd': flogin_caption_pwd,
                    'loginname': login_name,
                    'loginpwd': login_pwd,
                }
                cffileb.append(tmpcfg)
                count = count + 1

        #~ scrapers with web login
        for dsearchmodule in dsearchsupport:
            if (dsearchmodule['opts']['builtin']):
                option = 'checked=yes'
                flogin = 0
                login_name = ''
                login_pwd = ''
                speed_cl = dsearchmodule['opts']['speed_cl']
                if (dsearchmodule['opts']['active'] == 0):
                    option = ''

                for i in xrange(len(cdsfile)):
                    if (cdsfile[i]['type'] == dsearchmodule['opts']['typesrch']
                        ):
                        if (cdsfile[i]['valid'] == 0):
                            option = ''
                        else:
                            option = 'checked=yes'
                        #~ speed_cl = cdsfile[i]['speed_cl']
                        login_name = cdsfile[i]['user']
                        login_pwd = cdsfile[i]['pwd']
                if (dsearchmodule['opts']['login'] == 1):
                    flogin = 1
                tmpcfg = {
                    'stchk': option,
                    'humanname': dsearchmodule['name'],
                    'url': dsearchmodule['opts']['url'],
                    'idx': count,
                    'flogin_caption_user': '******',
                    'flogin_caption_pwd': 'pwd',
                    'speed_class': speed_cl,
                    'type': dsearchmodule['opts']['typesrch'],
                    'flogin': flogin,
                    'loginname': login_name,
                    'loginpwd': login_pwd,
                }
                cffileb.append(tmpcfg)
                count = count + 1

        count = 0
        for i in xrange(len(cffile)):
            if (cffile[i]['builtin'] == 0):
                cffile[i]['idx'] = count
                cffile[i]['valid_verbose'] = ''
                if (cffile[i]['valid'] == 1):
                    cffile[i]['valid_verbose'] = 'checked=yes'
                count = count + 1
                sel_speedopt_tmp = copy.deepcopy(self.selectable_speedopt)
                sel_speedopt_tmp[cffile[i]['speed_class'] - 1][2] = 'selected'
                cffile[i]['selspeed_sel'] = sel_speedopt_tmp

        sel_speedopt_basic = copy.deepcopy(self.selectable_speedopt)
        sel_speedopt_basic[0][2] = 'selected'

        count_ds = 0
        cdsfile_toshow1 = []
        for i in xrange(len(cdsfile)):
            if (cdsfile[i]['type'] == 'DSN'):
                cdsfile_toshow = copy.deepcopy(cdsfile[i])
                cdsfile_toshow['idx'] = count_ds
                cdsfile_toshow['valid_verbose'] = ''
                if (cdsfile_toshow['valid'] == 1):
                    cdsfile_toshow['valid_verbose'] = 'checked=yes'
                count_ds = count_ds + 1
                sel_speedopt_tmp = copy.deepcopy(self.selectable_speedopt)
                sel_speedopt_tmp[cdsfile[i]['speed_class'] - 1][2] = 'selected'
                cdsfile_toshow['selspeed_sel'] = sel_speedopt_tmp
                cdsfile_toshow1.append(cdsfile_toshow)

        possibleopt = megasearch.listpossiblesearchoptions()
        for slctg in possibleopt:
            if (slctg[0] == genopt['search_default']):
                slctg[2] = 'selected'

        tnarray = []
        for ntn in xrange(1, 50):
            if (genopt['trends_qty'] == ntn):
                tnarray.append([ntn, ntn, 'selected'])
            else:
                tnarray.append([ntn, ntn, ''])

        genopt['general_https_verbose'] = ''
        genopt['general_trend_verbose'] = ''
        genopt['general_suggestion_verbose'] = ''
        genopt['smartsearch_verbose'] = ''
        genopt['max_cache_verbose'] = ''
        genopt['predb_active_verbose'] = ''
        if (genopt['predb_active'] == 1):
            genopt['predb_active_verbose'] = 'checked=yes'
        if (genopt['general_https'] == 1):
            genopt['general_https_verbose'] = 'checked=yes'
        if (genopt['general_suggestion'] == 1):
            genopt['general_suggestion_verbose'] = 'checked=yes'
        if (genopt['general_trend'] == 1):
            genopt['general_trend_verbose'] = 'checked=yes'
        if (genopt['smartsearch'] == 1):
            genopt['smartsearch_verbose'] = 'checked=yes'
        if (genopt['cache_active'] == 1):
            genopt['cache_active_verbose'] = 'checked=yes'
        genopt['general_ipaddress_verbose'] = 'AUTO'
        if (genopt['general_ipaddress'] != ''):
            genopt['general_ipaddress_verbose'] = genopt['general_ipaddress']

        openshift_install = False
        if (len(self.dirconf)):
            openshift_install = True
        return render_template('config.html',
                               cfg=cffile,
                               cfg_dp=cdsfile_toshow1,
                               cnt=count,
                               cnt_ds=count_ds,
                               genopt=genopt,
                               selectable_opt=possibleopt,
                               sel_speedopt_basic=sel_speedopt_basic,
                               openshift_install=openshift_install,
                               tnarray=tnarray,
                               cdomainname=cdomainname,
                               cnt_max=MAX_PROVIDER_NUMBER,
                               cfg_bi=cffileb)
import threading
from SuggestionModule import SuggestionResponses
import config_settings
import SearchModule
import logging
import logging.handlers
import time

DEBUGFLAG = True

motd = '\n\n~*~ ~*~ NZBMegasearcH detached trend ~*~ ~*~'
print motd

#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ 
cfgsets = config_settings.CfgSettings()	
logsdir = SearchModule.resource_path('logs/')
logging.basicConfig(filename=logsdir+'nzbmegasearch_detachedtrend.log',level=logging.INFO,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
handler = logging.handlers.RotatingFileHandler(logsdir+'nzbmegasearch_detachedtrend.log', maxBytes=cfgsets.cgen['log_size'], backupCount=cfgsets.cgen['log_backupcount'])
log.addHandler(handler)
log.info(motd)
cfgsets.cgen['trends'] = 1
cfgsets.cgen['search_suggestions'] = 1
cfgsets.cgen['large_server'] = False
sugg = SuggestionResponses(cfgsets.cfg, cfgsets.cgen)

#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ 

def pollsuggestions():
	sugg.asktrend_allparallel()
	sugg.asktrend_saveondisk()
Beispiel #54
0
    def html_editpage(self):

        count = 0
        if 'SearchModule.loadedModules' not in globals():
            SearchModule.loadSearchModules()

        dsearchsupport = DeepsearchModule.supportedengines()

        cffileb = []
        cffile = copy.deepcopy(self.cfg)
        cdsfile = self.cfg_deep
        genopt = self.cgen

        if (cffile is None):
            cffile = []

        if (cdsfile is None):
            cdsfile = []

        cdomainname = megasearch.getdomainext()

        for module in SearchModule.loadedModules:
            if (module.builtin):
                option = 'checked=yes'
                flogin = 0
                flogin_caption_user = '******'
                flogin_caption_pwd = 'pwd'
                login_name = ''
                login_pwd = ''
                speed_cl = 1
                extra_cl = 0
                if (module.active == 0):
                    option = ''
                for i in xrange(len(cffile)):
                    if (cffile[i]['type'] == module.typesrch):
                        if (cffile[i]['valid'] == 0):
                            option = ''
                        else:
                            option = 'checked=yes'

                        login_name = cffile[i]['login']
                        login_pwd = cffile[i]['pwd']
                        speed_cl = cffile[i]['speed_class']
                        extra_cl = cffile[i]['extra_class']

                if (module.login == 1):
                    flogin = 1
                    # ~ if('caption_login_user' in module):
                    if (hasattr(module, 'caption_login_user')):
                        flogin_caption_user = module.caption_login_user
                        flogin_caption_pwd = module.caption_login_pwd

                tmpcfg = {'stchk': option,
                          'humanname': module.name,
                          'url': '',
                          'idx': count,
                          'speed_class': speed_cl,
                          'extra_class': extra_cl,
                          'type': module.typesrch,
                          'flogin': flogin,
                          'flogin_caption_user': flogin_caption_user,
                          'flogin_caption_pwd': flogin_caption_pwd,
                          'loginname': login_name,
                          'loginpwd': login_pwd,
                          }
                cffileb.append(tmpcfg)
                count = count + 1

        # ~ scrapers with web login
        for dsearchmodule in dsearchsupport:
            if (dsearchmodule['opts']['builtin']):
                option = 'checked=yes'
                flogin = 0
                login_name = ''
                login_pwd = ''
                speed_cl = dsearchmodule['opts']['speed_cl']
                extra_cl = dsearchmodule['opts']['extra_cl']
                if (dsearchmodule['opts']['active'] == 0):
                    option = ''

                for i in xrange(len(cdsfile)):
                    if (cdsfile[i]['type'] == dsearchmodule['opts']['typesrch']):
                        if (cdsfile[i]['valid'] == 0):
                            option = ''
                        else:
                            option = 'checked=yes'
                        # ~ speed_cl = cdsfile[i]['speed_cl']
                        login_name = cdsfile[i]['user']
                        login_pwd = cdsfile[i]['pwd']
                if (dsearchmodule['opts']['login'] == 1):
                    flogin = 1
                tmpcfg = {'stchk': option,
                          'humanname': dsearchmodule['name'],
                          'url': dsearchmodule['opts']['url'],
                          'idx': count,
                          'flogin_caption_user': '******',
                          'flogin_caption_pwd': 'pwd',
                          'speed_class': speed_cl,
                          'extra_class': extra_cl,
                          'type': dsearchmodule['opts']['typesrch'],
                          'flogin': flogin,
                          'loginname': login_name,
                          'loginpwd': login_pwd,
                          }
                cffileb.append(tmpcfg)
                count = count + 1

        count = 0
        for i in xrange(len(cffile)):
            if (cffile[i]['builtin'] == 0):
                cffile[i]['idx'] = count
                cffile[i]['valid_verbose'] = ''
                if (cffile[i]['valid'] == 1):
                    cffile[i]['valid_verbose'] = 'checked=yes'
                count = count + 1
                sel_speedopt_tmp = copy.deepcopy(self.selectable_speedopt)
                sel_speedopt_tmp[cffile[i]['speed_class'] - 1][2] = 'selected'
                sel_extraopt_tmp = copy.deepcopy(self.selectable_extraopt)
                sel_extraopt_tmp[cffile[i]['extra_class']][2] = 'selected'
                cffile[i]['selspeed_sel'] = sel_speedopt_tmp
                cffile[i]['selextra_sel'] = sel_extraopt_tmp

        sel_speedopt_basic = copy.deepcopy(self.selectable_speedopt)
        sel_speedopt_basic[0][2] = 'selected'
        sel_extraopt_basic = copy.deepcopy(self.selectable_extraopt)
        sel_extraopt_basic[0][2] = 'selected'

        count_ds = 0
        cdsfile_toshow1 = []
        for i in xrange(len(cdsfile)):
            if (cdsfile[i]['type'] == 'DSN'):
                cdsfile_toshow = copy.deepcopy(cdsfile[i])
                cdsfile_toshow['idx'] = count_ds
                cdsfile_toshow['valid_verbose'] = ''
                if (cdsfile_toshow['valid'] == 1):
                    cdsfile_toshow['valid_verbose'] = 'checked=yes'
                count_ds = count_ds + 1
                sel_speedopt_tmp = copy.deepcopy(self.selectable_speedopt)
                sel_speedopt_tmp[cdsfile[i]['speed_class'] - 1][2] = 'selected'

                sel_extraopt_tmp = copy.deepcopy(self.selectable_extraopt)
                sel_extraopt_tmp[cdsfile[i]['extra_class']][2] = 'selected'

                cdsfile_toshow['selspeed_sel'] = sel_speedopt_tmp
                cdsfile_toshow['selextra_sel'] = sel_extraopt_tmp
                cdsfile_toshow1.append(cdsfile_toshow)

        possibleopt = megasearch.listpossiblesearchoptions()
        for slctg in possibleopt:
            if (slctg[0] == genopt['search_default']):
                slctg[2] = 'selected'

        tnarray = []
        for ntn in xrange(1, 50):
            if (genopt['trends_qty'] == ntn):
                tnarray.append([ntn, ntn, 'selected'])
            else:
                tnarray.append([ntn, ntn, ''])

        genopt['general_https_verbose'] = ''
        genopt['general_trend_verbose'] = ''
        genopt['general_suggestion_verbose'] = ''
        genopt['smartsearch_verbose'] = ''
        genopt['max_cache_verbose'] = ''
        genopt['predb_active_verbose'] = ''
        genopt['general_restrictopt1_verbose'] = ''
        genopt['general_dereferer_verbose'] = ''
        if (genopt['predb_active'] == 1):
            genopt['predb_active_verbose'] = 'checked=yes'
        if (genopt['general_https'] == 1):
            genopt['general_https_verbose'] = 'checked=yes'
        if (genopt['general_restrictopt1'] == 1):
            genopt['general_restrictopt1_verbose'] = 'checked=yes'
        if (genopt['general_dereferer'] == 1):
            genopt['general_dereferer_verbose'] = 'checked=yes'
        if (genopt['general_suggestion'] == 1):
            genopt['general_suggestion_verbose'] = 'checked=yes'
        if (genopt['general_trend'] == 1):
            genopt['general_trend_verbose'] = 'checked=yes'
        if (genopt['smartsearch'] == 1):
            genopt['smartsearch_verbose'] = 'checked=yes'
        if (genopt['use_warp'] == 1):
            genopt['use_warp_verbose'] = 'checked=yes'
        if (genopt['cache_active'] == 1):
            genopt['cache_active_verbose'] = 'checked=yes'
        genopt['general_ipaddress_verbose'] = 'AUTO'
        if (genopt['general_ipaddress'] != ''):
            genopt['general_ipaddress_verbose'] = genopt['general_ipaddress']

        openshift_install = False
        if (len(self.dirconf_oshift)):
            openshift_install = True
        return render_template('config.html', cfg=cffile, cfg_dp=cdsfile_toshow1, cnt=count, cnt_ds=count_ds,
                               genopt=genopt,
                               selectable_opt=possibleopt,
                               sel_speedopt_basic=sel_speedopt_basic,
                               sel_extraopt_basic=sel_extraopt_basic,
                               openshift_install=openshift_install,
                               tnarray=tnarray,
                               cdomainname=cdomainname,
                               cnt_max=MAX_PROVIDER_NUMBER, cfg_bi=cffileb)
Beispiel #55
0
def summary_results(rawResults, strsearch, logic_items=[], results_stats={}):

    results = []
    titles = []
    sptitle_collection = []

    #~ stats for each provider
    for provid in xrange(len(rawResults)):
        if (len(rawResults[provid])):
            results_stats[str(rawResults[provid][0]['providertitle'])] = [
                len(rawResults[provid]), 0
            ]

    #~ all in one array
    for provid in xrange(len(rawResults)):
        for z in xrange(len(rawResults[provid])):
            if (rawResults[provid][z]['title'] != None):
                rawResults[provid][z]['title'] = SearchModule.sanitize_html(
                    rawResults[provid][z]['title'])
                rawResults[provid][z]['provid'] = provid
                title = SearchModule.sanitize_strings(
                    rawResults[provid][z]['title'])
                titles.append(title)
                sptitle_collection.append(Set(title.split(".")))
                results.append(rawResults[provid][z])

    strsearch1 = SearchModule.sanitize_strings(strsearch)
    strsearch1_collection = Set(strsearch1.split("."))

    rcount = [0] * 3
    for z in xrange(len(results)):
        findone = 0
        results[z]['ignore'] = 0
        intrs = strsearch1_collection.intersection(sptitle_collection[z])
        if (len(intrs) == len(strsearch1_collection)):
            findone = 1
        else:
            results[z]['ignore'] = 2
            #~ relax the search ~ 0.45
            unmatched_terms_search = strsearch1_collection.difference(intrs)
            unmatched_count = 0
            for mst in unmatched_terms_search:
                my_list = [
                    i for i in sptitle_collection[z] if i.find(mst) == 0
                ]
                if (len(my_list)):
                    unmatched_count = unmatched_count + 1
                if (unmatched_count == len(unmatched_terms_search)):
                    findone = 1
                    results[z]['ignore'] = 0
                #~ print unmatched_terms_search
                #~ print unmatched_count
                #~ print unmatched_terms_search

        #~ print strsearch1_collection
        #~ print intrs
        #~ print findone
        #~ print '------------------'

        if (findone and results[z]['ignore'] == 0):
            #~ print titles[z]
            for v in xrange(z + 1, len(results)):
                if (titles[z] == titles[v]):
                    sz1 = float(results[z]['size'])
                    sz2 = float(results[v]['size'])
                    if (abs(sz1 - sz2) < 5000000):
                        results[z]['ignore'] = 1
        #~ stats
        rcount[results[z]['ignore']] += 1

    #~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
    #~ logic params
    exclude_coll = Set([])
    include_coll = Set([])
    #~ print '*'+logic_items[0][1]+'*'
    for i in xrange(len(logic_items)):
        if (logic_items[i][0] == '-'):
            exclude_coll.add(logic_items[i][1])
        if (logic_items[i][0] == '+'):
            include_coll.add(logic_items[i][1])
    if (len(include_coll)):
        for z in xrange(len(results)):
            if (results[z]['ignore'] < 2):
                intrs_i = include_coll.intersection(sptitle_collection[z])
                if (len(intrs_i) == 0):
                    results[z]['ignore'] = 2
    if (len(exclude_coll)):
        for z in xrange(len(results)):
            if (results[z]['ignore'] < 2):
                intrs_e = exclude_coll.intersection(sptitle_collection[z])
                if (len(intrs_e) > 0):
                    results[z]['ignore'] = 2
    #~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~

    mssg = 'Overall search stats: [' + strsearch1 + ']' + ' [' + strsearch + '] ' + str(
        rcount[0]) + ' ' + str(rcount[1]) + ' ' + str(rcount[2])
    log.info(mssg)

    for z in xrange(len(results)):
        if (results[z]['ignore'] != 2):
            results_stats[str(
                results[z]['providertitle'])][1] = results_stats[str(
                    results[z]['providertitle'])][1] + 1
    return results
Beispiel #56
0
    mega_parall = megasearch.DoParallelSearch(cfgsets.cfg, cfgsets.cgen, ds,
                                              wrp)
    apiresp = ApiResponses(cfgsets.cfg, cfgsets.cgen, wrp, ds)
    getsmartinfo = nzbsanity.GetNZBInfo(cfgsets.cfg, cfgsets.cgen, ds, app)
    auth = miscdefs.Auth(cfgsets)
    testserver = miscdefs.ChkServer(cfgsets.cgen)


# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
motd = '\n\n~*~ ~*~ NZBMegasearcH ~*~ ~*~'
print motd

DEBUGFLAG = False
LARGESERVER = False

logsdir = SearchModule.resource_path('logs/')
if (len(os.getenv('OPENSHIFT_DATA_DIR', ''))):
    logsdir = os.environ.get('OPENSHIFT_DATA_DIR')

if (len(sys.argv) > 1):
    for argv in sys.argv:
        if (argv == 'help'):
            print ''
            print '`debug`: start in debug mode'
            print '`large`: modality for GUNICORN + NGINX large server'
            print '`daemon`: start in daemon mode, detached from terminal'
            print ''
            exit()

        if (argv == 'debug'):
            print '====== DEBUGMODE DEBUGMODE DEBUGMODE DEBUGMODE ======'
Beispiel #57
0
    def dosearch(self, args):
        # ~ restore originals
        self.cfg = copy.deepcopy(self.cfg_cpy)

        if ('q' not in args):
            self.results = []
            return self.results
        nuqry = args['q'] + ' ' + self.cgen['searchaddontxt']
        self.logic_items = self.logic_expr.findall(nuqry)
        self.qry_nologic = self.logic_expr.sub(" ", nuqry)
        if ('selcat' in args):
            if (args['selcat'] != ""):
                self.qry_nologic += " " + args['selcat']

        # ~ speed class
        speed_class_sel = 1
        if ('tm' in args):
            speed_class_sel = int(args['tm'])

        # ~ speed class deepsearch
        self.ds.set_extraopt(speed_class_sel, 'manual')
        # ~ speed class Nabbased
        self.set_timeout_speedclass(speed_class_sel)
        # ~ manual search Nabbased
        self.set_extraopt()

        if (len(args['q']) == 0):
            if ('selcat' in args):
                if (len(args['selcat']) == 0):
                    self.results = []
                    return self.results
            else:
                self.results = []
                return self.results
        if (self.qry_nologic.replace(" ", "") == ""):
            self.results = []
            return self.results

        log.info('TYPE OF SEARCH: ' + str(speed_class_sel))

        self.cleancache()
        # ~ cache hit, no server report
        cachehit = True
        self.returncode_fine['code'] = 2
        self.resultsraw = self.chkforcache(self.wrp.chash64_encode(SearchModule.sanitize_strings(self.qry_nologic)),
                                           speed_class_sel)
        if (self.resultsraw is None):
            self.resultsraw = SearchModule.performSearch(self.qry_nologic, self.cfg, self.ds)
            cachehit = False

        if (self.cgen['smartsearch'] == 1):
            # ~ smartsearch
            self.res_results = {}
            self.results = summary_results(self.resultsraw, self.qry_nologic, self.logic_items, self.res_results)
        else:
            # ~ no cleaning just flatten in one array
            self.results = []
            self.res_results = {}
            for provid in xrange(len(self.resultsraw)):
                if (len(self.resultsraw[provid])):
                    self.res_results[str(self.resultsraw[provid][0]['providertitle'])] = [len(self.resultsraw[provid]),
                                                                                          0]
            for provid in xrange(len(self.resultsraw)):
                for z in xrange(len(self.resultsraw[provid])):
                    if (self.resultsraw[provid][z]['title'] != None):
                        self.results.append(self.resultsraw[provid][z])

        # ~ server status output
        if (cachehit == False):
            self.prepareretcode();
Beispiel #58
0
    def dosearch(self, args):
        #~ restore originals
        self.cfg = copy.deepcopy(self.cfg_cpy)

        if ('q' not in args):
            self.results = []
            return self.results
        nuqry = args['q'] + ' ' + self.cgen['searchaddontxt']
        self.logic_items = self.logic_expr.findall(nuqry)
        self.qry_nologic = self.logic_expr.sub(" ", nuqry)
        if ('selcat' in args):
            if (args['selcat'] != ""):
                self.qry_nologic += " " + args['selcat']

        #~ speed class
        speed_class_sel = 1
        if ('tm' in args):
            speed_class_sel = int(args['tm'])

        #~ speed class deepsearch
        self.ds.set_extraopt(speed_class_sel, 'manual')
        #~ speed class Nabbased
        self.set_timeout_speedclass(speed_class_sel)
        #~ manual search Nabbased
        self.set_extraopt()

        if (len(args['q']) == 0):
            if ('selcat' in args):
                if (len(args['selcat']) == 0):
                    self.results = []
                    return self.results
            else:
                self.results = []
                return self.results
        if (self.qry_nologic.replace(" ", "") == ""):
            self.results = []
            return self.results

        log.info('TYPE OF SEARCH: ' + str(speed_class_sel))

        self.cleancache()
        #~ cache hit, no server report
        cachehit = True
        self.returncode_fine['code'] = 2
        self.resultsraw = self.chkforcache(
            self.wrp.chash64_encode(
                SearchModule.sanitize_strings(self.qry_nologic)),
            speed_class_sel)
        if (self.resultsraw is None):
            self.resultsraw = SearchModule.performSearch(
                self.qry_nologic, self.cfg, self.ds)
            cachehit = False

        if (self.cgen['smartsearch'] == 1):
            #~ smartsearch
            self.res_results = {}
            self.results = summary_results(self.resultsraw, self.qry_nologic,
                                           self.logic_items, self.res_results)
        else:
            #~ no cleaning just flatten in one array
            self.results = []
            self.res_results = {}
            for provid in xrange(len(self.resultsraw)):
                if (len(self.resultsraw[provid])):
                    self.res_results[str(
                        self.resultsraw[provid][0]['providertitle'])] = [
                            len(self.resultsraw[provid]), 0
                        ]
            for provid in xrange(len(self.resultsraw)):
                for z in xrange(len(self.resultsraw[provid])):
                    if (self.resultsraw[provid][z]['title'] != None):
                        self.results.append(self.resultsraw[provid][z])

        #~ server status output
        if (cachehit == False):
            self.prepareretcode()
Beispiel #59
0
def summary_results(rawResults, strsearch, logic_items=[], results_stats={}):
    results = []
    titles = []
    sptitle_collection = []

    # ~ stats for each provider
    for provid in xrange(len(rawResults)):
        if (len(rawResults[provid])):
            results_stats[str(rawResults[provid][0]['providertitle'])] = [len(rawResults[provid]), 0]

    # ~ all in one array
    for provid in xrange(len(rawResults)):
        for z in xrange(len(rawResults[provid])):
            if (rawResults[provid][z]['title'] != None):
                rawResults[provid][z]['title'] = SearchModule.sanitize_html(rawResults[provid][z]['title'])
                rawResults[provid][z]['provid'] = provid
                title = SearchModule.sanitize_strings(rawResults[provid][z]['title'])
                titles.append(title)
                sptitle_collection.append(Set(title.split(".")))
                results.append(rawResults[provid][z])

    strsearch1 = SearchModule.sanitize_strings(strsearch)
    strsearch1_collection = Set(strsearch1.split("."))

    rcount = [0] * 3
    for z in xrange(len(results)):
        findone = 0
        results[z]['ignore'] = 0
        intrs = strsearch1_collection.intersection(sptitle_collection[z])
        if (len(intrs) == len(strsearch1_collection)):
            findone = 1
        else:
            results[z]['ignore'] = 2
            # ~ relax the search ~ 0.45
            unmatched_terms_search = strsearch1_collection.difference(intrs)
            unmatched_count = 0
            for mst in unmatched_terms_search:
                my_list = [i for i in sptitle_collection[z] if i.find(mst) == 0]
                if (len(my_list)):
                    unmatched_count = unmatched_count + 1
                if (unmatched_count == len(unmatched_terms_search)):
                    findone = 1
                    results[z]['ignore'] = 0
                # ~ print unmatched_terms_search
                # ~ print unmatched_count
                # ~ print unmatched_terms_search


        # ~ print strsearch1_collection
        # ~ print intrs
        # ~ print findone
        # ~ print '------------------'

        if (findone and results[z]['ignore'] == 0):
            # ~ print titles[z]
            for v in xrange(z + 1, len(results)):
                if (titles[z] == titles[v]):
                    sz1 = float(results[z]['size'])
                    sz2 = float(results[v]['size'])
                    if (abs(sz1 - sz2) < 5000000):
                        results[z]['ignore'] = 1
        # ~ stats
        rcount[results[z]['ignore']] += 1

    # ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
    # ~ logic params
    exclude_coll = Set([])
    include_coll = Set([])
    # ~ print '*'+logic_items[0][1]+'*'
    for i in xrange(len(logic_items)):
        if (logic_items[i][0] == '-'):
            exclude_coll.add(logic_items[i][1])
        if (logic_items[i][0] == '+'):
            include_coll.add(logic_items[i][1])
    if (len(include_coll)):
        for z in xrange(len(results)):
            if (results[z]['ignore'] < 2):
                intrs_i = include_coll.intersection(sptitle_collection[z])
                if (len(intrs_i) == 0):
                    results[z]['ignore'] = 2
    if (len(exclude_coll)):
        for z in xrange(len(results)):
            if (results[z]['ignore'] < 2):
                intrs_e = exclude_coll.intersection(sptitle_collection[z])
                if (len(intrs_e) > 0):
                    results[z]['ignore'] = 2
    # ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~

    mssg = 'Overall search stats: [' + strsearch1 + ']' + ' [' + strsearch + '] ' + str(rcount[0]) + ' ' + str(
        rcount[1]) + ' ' + str(rcount[2])
    log.info(mssg)

    for z in xrange(len(results)):
        if (results[z]['ignore'] != 2):
            results_stats[str(results[z]['providertitle'])][1] = results_stats[str(results[z]['providertitle'])][1] + 1
    return results
Beispiel #60
0
    def beam_cookie(self, urltouse, args):
        retfail = -1

        global globalResults

        if 'loadedModules' not in globals():
            SearchModule.loadSearchModules()

        cookie = {}

        dwntype = 0
        index = 0
        selcfg_idx = 0

        #~ take the right password
        for i in xrange(len(self.cfg)):
            if (self.cfg[i]['type'] == args['m']):
                selcfg_idx = i

        #~ standard search
        for module in SearchModule.loadedModules:
            if (module.typesrch == args['m']):
                dwntype = 0
                if (module.dologin(self.cfg[selcfg_idx]) == True):
                    cookie = module.cookie
                else:
                    return retfail

        #~ deep search
        deepidx = 0
        for index in xrange(len(self.dsearch.ds)):
            if (self.dsearch.ds[index].typesrch == args['m']):
                dwntype = 1
                deepidx = index

        f_name = ''
        if (dwntype == 0):
            log.info('WARPNGN Cookie FTD found')
            try:
                opener = urllib2.build_opener()
                opener.addheaders.append(
                    ('Cookie', 'FTDWSESSID=' + cookie['FTDWSESSID']))
                response = opener.open(urltouse)

            except Exception as e:
                return retfail

        if (dwntype == 1):
            log.info('WARPNGN Cookie deep found')
            response = self.dsearch.ds[deepidx].download(urltouse)
            if (response == ''):
                return -1

        fcontent = response.read()
        #~ print response.info()
        f = tempfile.NamedTemporaryFile(delete=False)
        f.write(fcontent)
        f.close()
        fresponse = send_file(f.name,
                              mimetype='application/x-nzb;',
                              as_attachment=True,
                              attachment_filename='yourmovie.nzb',
                              add_etags=False,
                              cache_timeout=None,
                              conditional=False)

        try:
            os.remove(f.name)
        except Exception as e:
            print 'Cannot remove temporary NZB file'

        for i in xrange(len(response.info().headers)):
            if (response.info().headers[i].find('Content-Encoding') != -1):
                fresponse.headers["Content-Encoding"] = 'gzip'
            riff = response.info().headers[i].find('Content-Disposition')
            if (riff != -1):
                fresponse.headers["Content-Disposition"] = response.info(
                ).headers[i][riff +
                             21:len(response.info().headers[i])].strip()
        return fresponse