Example #1
0
    def __init__(self, state):
        self.state = state
        self.paragraphs = []
        self.paragraphs.append(SettingParagraph(self.state))
        food_r = random.random()
        extra_paragraphs = []
        self.potential_titles = []
        self.potential_titles.append('The ' + string.capwords(self.state.get_current_setting().get_description()))

        if (self.state.get_food_level() == 'medium' and food_r < 0.1) or \
            (self.state.get_food_level() == 'low' and food_r < 0.3) or \
            (self.state.get_food_level() == 'none' and food_r < 0.5):
            food_paragraph = FoodParagraph(self.state)
            extra_paragraphs.append(food_paragraph)
            self.potential_titles.append(food_paragraph.method.title() + ' In The ' + string.capwords(self.state.get_current_setting().get_name()))

        energy_r = random.random()
        if (self.state.get_energy_level() == 'medium' and energy_r < 0.1) or \
            (self.state.get_energy_level() == 'low' and energy_r < 0.3) or \
            (self.state.get_energy_level() == 'none' and energy_r < 0.5):
            self.potential_titles.append('Camping' + ' In The ' + string.capwords(self.state.get_current_setting().get_description()))
            extra_paragraphs.append(CampParagraph(self.state))

        if len(self.state.get_enemy_patrols()) > 0 and random.random() > 0.5:
            self.potential_titles.append('The Battle In The ' + string.capwords(self.state.get_current_setting().get_description()))
            extra_paragraphs.append(CombatParagraph(self.state))

        random.shuffle(extra_paragraphs)
        self.paragraphs += extra_paragraphs
Example #2
0
 def GET(self, path=''):
     itemlist = []
     if path.startswith('..'):
         path = ''
     for item in os.listdir(os.path.join(MEDIA_RDIR,path)):
         if os.path.isfile(os.path.join(MEDIA_RDIR,path,item)):
             fname = os.path.splitext(item)[0]
             fname = re.sub('[^a-zA-Z0-9\[\]\(\)\{\}]+',' ',fname)
             fname = re.sub('\s+',' ',fname)
             fname = string.capwords(fname.strip())
             singletuple = (os.path.join(path,item),fname,'file')
         else:
             fname = re.sub('[^a-zA-Z0-9\']+',' ',item)
             fname = re.sub('\s+',' ',fname)
             fname = string.capwords(fname.strip())
             singletuple = (os.path.join(path,item),fname,'dir')
         itemlist.append(singletuple)
     itemlist = [f for f in itemlist if not os.path.split(f[0])[1].startswith('.')]
     itemlist = [f for f in itemlist if os.path.splitext(f[0])[1].lower() in PLAYABLE_TYPES or f[2]=='dir']
     list.sort(itemlist, key=lambda alpha: alpha[1])
     list.sort(itemlist, key=lambda dirs: dirs[2])
     outputlist=[]
     for line in itemlist:
         outputlist.append('{\"path\":\"'+line[0]+'\", \"name\":\"'+line[1]+'\", \"type\":\"'+line[2]+'\"}')
     return '[\n'+',\n'.join(outputlist)+']'
Example #3
0
def getCities(url):
    soup = scrape(url)
    s = str(soup.findAll(id="list"))
    result = {}
    match = re.findall('<a href="(http://\w+.craigslist.org/)">([\w ]+)</a>', s)
    for f in match:
        city = string.capwords(f[1])
        url = f[0]
        result[city] = url
    match = re.findall('<a href="(http://\w+.craigslist.org/)"><b>([\w ]+)</b></a>', s)
    for f in match:
        city = string.capwords(f[1])
        url = f[0]
        result[city] = getAreas(url)
    match = re.findall('<a href="(http://\w+.craigslist.org/)">([\w ]+)\W*[/-]+\W*([\w ]+)</a>', s)
    for f in match:
        city1 = string.capwords(f[1])
        city2 = string.capwords(f[1])
        url = f[0]
        result[city1] = url
        result[city2] = url
    if result:
        return result 
    else:
        f = urllib.urlopen(url)
        return f.geturl()
Example #4
0
 def __setitem__(self, i, y):
     self.regexIsDirty = True
     # for each entry the user adds, we actually add three entrys:
  
     super(WordSub,self).__setitem__(string.lower(i),string.lower(y)) # key = value
     super(WordSub,self).__setitem__(string.capwords(i), string.capwords(y)) # Key = Value
     super(WordSub,self).__setitem__(string.upper(i), string.upper(y)) # K
Example #5
0
def gather_teams(years):
        """Gathers team names and ID numbers in the specified league"""
        for year in years:
		url = "http://games.espn.go.com/ffl/standings?leagueId=%s&seasonId=%s" % (args.league, year)
        	ourUrl = opener.open(url).read()
	        soup = BeautifulSoup(ourUrl)
		for num,division in enumerate(soup.findAll(bgcolor = '#ffffff', id = re.compile(r'\d'))):
	        	for i in division.findAll('tr', {'class' : 'evenRow bodyCopy sortableRow'}):
				title = i.find('td').text
				owner = string.capwords(title[title.find("(")+1:title.find(")")])
				pf = i.find('td', {'class': 'sortablePF'}).text
				pa = i.find('td', {'class': 'sortablePA'}).text
		                parsed = urlparse.urlparse(i.a['href']) #parse url parameters
        		        id = urlparse.parse_qs(parsed.query)['teamId'][0]
	                	name = i.a.text
		                teams.append(TeamID(name,owner,int(id),pf,pa,year,num+1))
			for i in division.findAll('tr', {'class' : 'oddRow bodyCopy sortableRow'}):
				title = i.find('td').text
		                owner = string.capwords(title[title.find("(")+1:title.find(")")])
        		        pf = i.find('td', {'class': 'sortablePF'}).text
                		pa = i.find('td', {'class': 'sortablePA'}).text
				parsed = urlparse.urlparse(i.a['href']) #parse url parameters
	        	        id = urlparse.parse_qs(parsed.query)['teamId'][0]
        	        	name = i.a.text
                		teams.append(TeamID(name,owner,int(id),pf,pa,year,num+1))
Example #6
0
    def generate_response(self, seed):
        response = []
        key = seed

        for x in range(self.max_words):

            words = key.split()
            words = map(self.sanitize, words)

            response.append(words[0])

            new_word = random.choice(self.corpus[(words[0], words[1])])

            if not new_word:
                break

            key = words[1] + " " + new_word

        for i in range(len(response)):
            if response[i - 1] and response[i - 1] == ".":
                response[i] = string.capwords(response[i])

        response[0] = string.capwords(response[0])
        response[-1] += '.'

        str_response = ' '.join(response)

        return re.sub(r'\s([?.!"](?:\s|$))', r'\1', str_response)
Example #7
0
    def format_data(self, data):
        for item in data['results']:
            item['visitor_name'] = string.capwords(item.get('namefirst', ''))
            if item.get('namemid', '') != '':
                item['visitor_name'] += " " + item.get('namemid', '') + "."
            item['visitor_name'] += " " + string.capwords(item.get('namelast', ''))
            item['visited_name'] = string.capwords(item.get('visitee_namefirst', ''))
            if string.capwords(item.get('visitee_namelast', '')) != 'And':
                item['visited_name'] += " " + string.capwords(item.get('visitee_namelast', ''))
            if item['visited_name'].rstrip().lstrip() == 'Potus':
                item['visited_name'] = 'The President'
            item['description'] = item.get('description', '').lower()

            led = item.get('lastentrydate', '')
            dd = DATE_PATTERN.match(led)
            if dd:
                dd = dd.groupdict()
                year = int(dd['year'])
                # yes, this is how I'm doing this
                if year < 2000:
                    year += 2000
                if dd['hour'] != None and dd['minute'] != None:
                    dt = datetime.datetime(year, int(dd['month']), int(dd['day']), int(dd['hour']), int(dd['minute']))

                    # will fail on systems with non-GNU C libs (i.e. Windows) due to %- removal of zero-padding
                    item['lastentry_date'] = dt.strftime('%-m-%-d-%Y %-I:%M %p')
                else:
                    dt = datetime.datetime(year, int(dd['month']), int(dd['day']))

                    item['lastentry_date'] = dt.strftime('%-m-%-d-%Y')
            else:
                item['lastentry_date'] = led

        return data
Example #8
0
 def world_city_to_display_format(entry):
     parts = entry.split(',')
     return {
         'city': string.capwords(parts[0]),
         'state': string.capwords(parts[1]),
         'country': string.capwords(parts[2])
     }
Example #9
0
    def search_results(self, mood, query):
        tokens = tokenize(query)
        counts = defaultdict(int)
        for token in tokens:
            counts[token] += 1
        query_tf = defaultdict(float)
        for token, count in counts.iteritems():
            query_tf[token] = self._term_tf_idf(token, count)
        # magnitude
        mag = lambda x: math.sqrt(sum(i ** 2 for i in x))
        m = mag(query_tf.values())

        for token, count in query_tf.iteritems():
            if m != 0:
                query_tf[token] = count / m
            else:
                query_tf[token] = 0
        moodlist = []
        for song in self.song_list:
            if song["mood"] == mood:
                moodlist.append(song)
        moodcosinelist = []
        for song in moodlist:
            cosine = sum([query_tf[term] * song["tfidf"].get(term, 0) for term in query_tf.keys()])
            moodcosinelist.append(
                {"song": string.capwords(song["title"]), "cosine": cosine, "artist": string.capwords(song["artist"])}
            )
        neighbors = heapq.nlargest(10, moodcosinelist, key=operator.itemgetter("cosine"))
        if neighbors == []:
            neighbors = moodlist[:10]
        return neighbors
Example #10
0
def load_files(culture):
    # Process Zip Codes
    all_zips = {}
    reader = csv.reader(open(os.path.join(data_dir, culture, "zip-codes.txt"), "rb"))
    for row in reader:
        data = [string.capwords(row[3]), row[4]]
        all_zips[row[0]] = data
    output = open('source-data.pkl', 'wb')
    pickle.dump(all_zips, output)

    #Process area codes
    area_code_file = open(os.path.join(data_dir, culture, "area-codes.txt"), "rb")
    state_area_codes = {}
    for line in area_code_file:
        clean_line = line.replace(' ','').rstrip('\n')
        state_area_codes[line.split(':')[0]] = clean_line[3:].split(',')
    pickle.dump(state_area_codes, output)
    area_code_file.close()

    #Process Last Names
    last_names = []
    last_name_file = open(os.path.join(data_dir, culture, "last-name.txt"),"rb")
    for line in last_name_file:
        clean_line = line.rstrip('\n')
        last_names.append(string.capwords(clean_line.split(' ')[0]))
    pickle.dump(last_names, output)
    last_name_file.close()

    #Process Male First Names
    male_first_names = []
    male_first_name_file = open(os.path.join(data_dir, culture, "male-first-name.txt"),"rb")
    for line in male_first_name_file:
        clean_line = line.rstrip('\n')
        male_first_names.append(string.capwords(clean_line.split(' ')[0]))
    pickle.dump(male_first_names, output)
    male_first_name_file.close()

    #Process Female First Names
    female_first_names = []
    female_first_name_file = open(os.path.join(data_dir, culture, "female-first-name.txt"),"rb")
    for line in female_first_name_file:
        clean_line = line.rstrip('\n')
        female_first_names.append(string.capwords(clean_line.split(' ')[0]))
    pickle.dump(female_first_names, output)
    female_first_name_file.close()

    #Process the simple files
    for f in simple_files_to_process:
        temp = []
        if f == "email-domains.txt" or f == "latin-words.txt":
            sample_file = open(os.path.join(data_dir, f), "rb")
        else:
            sample_file = open(os.path.join(data_dir, culture, f), "rb")
        for line in sample_file:
            clean_line = line.rstrip('\n')
            temp.append(clean_line)
        pickle.dump(temp, output)
        sample_file.close()
        temp = []
    output.close()
 def __name__(self):
     feat_name = []
     for m1 in self.aggregation_mode_prev:
         for m in self.aggregation_mode:
             n = "EditDistance_%s_%s_%s"%(self.ngram_str, string.capwords(m1), string.capwords(m))
             feat_name.append(n)
     return feat_name
 def format_answers(answers):
     print 'Answers:'
     number = 0
     for answer in answers:
         number += 1          
         answers = string_replace(regexes[3], "", str(number) + '.' + answer.replace(":", ""))
         print string.capwords(answers)
def insertIntoDict(k, v, aDict):
	key = string.capwords(k)
	value = string.capwords(v)
	if key not in aDict:
		aDict[key] = [value]
	else: 
		aDict[key].append(value)
Example #14
0
def add_parts(words):
        indi_mangled = []
        mangled = []
        mangled_fupper = []
        mangled_upper = [word[:1].upper() + word[1:] for word in words]
        mangled_lower = [word[:1].lower() + word[1:] for word in words]
        mangled_prep = [word[:1].lower() + word[1:] for word in words]
        for s in itertools.permutations(mangled_upper, 3):
                mangled.append(''.join(s))
        for s in itertools.permutations(mangled_lower, 3):
                mangled.append(''.join(s))
        for s in itertools.permutations(mangled_prep, 3):
                mangled_fupper.append(''.join(s))
        for s in mangled_fupper:
                mangled.append(string.capwords(s))
        for word in words:
                mangled.append(word)
        
        for s in itertools.permutations(mangled_upper, 2):
                indi_mangled.append(''.join(s))
        for s in itertools.permutations(mangled_lower, 2):
                indi_mangled.append(''.join(s))
        for s in itertools.permutations(mangled_prep, 2):
                indi_mangled.append(''.join(s))
        for s in mangled_fupper:
                indi_mangled.append(string.capwords(s))
        for word in words:
                indi_mangled.append(word)
                
        joined_list = mangled + indi_mangled
        
        return joined_list
Example #15
0
def main():
    santas = assign_santas(read_families(sys.argv[1]))
    f = open("santas_list.txt", "w")
    for s in santas.items():
        f.write(string.capwords(s[0]) + " is a Santa for " + string.capwords(s[1]))
        f.write("\n")
    check_santas(santas, read_families(sys.argv[1]))
Example #16
0
def __subdivisionSummary( plug ) :

	info = []
	if plug["subdividePolygons"]["enabled"].getValue() :
		info.append( "Subdivide Polygons " + ( "On" if plug["subdividePolygons"]["value"].getValue() else "Off" ) )
	if plug["subdivIterations"]["enabled"].getValue() :
		info.append( "Iterations %d" % plug["subdivIterations"]["value"].getValue() )
	if plug["subdivAdaptiveError"]["enabled"].getValue() :
		info.append( "Error %s" % GafferUI.NumericWidget.valueToString( plug["subdivAdaptiveError"]["value"].getValue() ) )
	if plug["subdivAdaptiveMetric"]["enabled"].getValue() :
		info.append( string.capwords( plug["subdivAdaptiveMetric"]["value"].getValue().replace( "_", " " ) ) + " Metric" )
	if plug["subdivAdaptiveSpace"]["enabled"].getValue() :
		info.append( string.capwords( plug["subdivAdaptiveSpace"]["value"].getValue() ) + " Space" )
	if plug["subdivUVSmoothing"]["enabled"].getValue() :
		info.append(
			{
				"pin_corners" : "Pin UV Corners",
				"pin_borders" : "Pin UV Borders",
				"linear" : "Linear UVs",
				"smooth" : "Smooth UVs",
			}.get( plug["subdivUVSmoothing"]["value"].getValue() )
		)
	if plug["subdivSmoothDerivs"]["enabled"].getValue() :
		info.append( "Smooth Derivs " + ( "On" if plug["subdivSmoothDerivs"]["value"].getValue() else "Off" ) )

	return ", ".join( info )
Example #17
0
 def _do_write(fname, variable, version, date, table):
     print("writing {} ..".format(fname))
     import unicodedata
     import datetime
     import string
     utc_now = datetime.datetime.now(tz=datetime.timezone.utc)
     INDENT = 4
     with open(fname, 'w') as fp:
         fp.write("# Generated: {iso_utc}\n"
                  "# Source: {version}\n"
                  "# Date: {date}\n"
                  "{variable} = (".format(iso_utc=utc_now.isoformat(),
                                          version=version,
                                          date=date,
                                          variable=variable))
         for start, end in table:
             ucs_start, ucs_end = unichr(start), unichr(end)
             hex_start, hex_end = ('0x{0:04x}'.format(start),
                                   '0x{0:04x}'.format(end))
             try:
                 name_start = string.capwords(unicodedata.name(ucs_start))
             except ValueError:
                 name_start = u''
             try:
                 name_end = string.capwords(unicodedata.name(ucs_end))
             except ValueError:
                 name_end = u''
             fp.write('\n' + (' ' * INDENT))
             fp.write('({0}, {1},),'.format(hex_start, hex_end))
             fp.write('  # {0:24s}..{1}'.format(
                 name_start[:24].rstrip() or '(nil)',
                 name_end[:24].rstrip()))
         fp.write('\n)\n')
     print("complete.")
Example #18
0
def dictionary_search(query, phenny): 
    if hasattr(phenny.config, 'wordnik_api_key'):
        query = query.replace('!', '')
        query = web.quote(query)
        try:
            uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
            rec_bytes = web.get(uri)
            jsonstring = json.loads(rec_bytes)
            dword = jsonstring[0]['word']
        except:
            try:
                query = query.lower()
                uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
                rec_bytes = web.get(uri)
                jsonstring = json.loads(rec_bytes)
                dword = jsonstring[0]['word']
            except:
                query = string.capwords(query)
                uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
                rec_bytes = web.get(uri)
                jsonstring = json.loads(rec_bytes)
        try:
            dword = jsonstring[0]['word']
        except:
            return None
        if dword:
            ddef = jsonstring[0]['text']
            dattr = jsonstring[0]['attributionText']
            dpart = jsonstring[0]['partOfSpeech']
            dpart = dpart.replace('-', ' ')
            dpart = string.capwords(dpart)
            return (dword + ' - ' + dpart + ' - ' + ddef + ' - ' + dattr)
    else:
        return 'Sorry but you need to set your wordnik_api_key in the config file.'
Example #19
0
    def __get_song_from_list(cls, element):
        ul = element.find("ul", {"class": cls.BOARD_CONTENT_CLASS})

        url = ul.get(cls.SONG_URL_PROP)

        id_parts = url.split("/")
        song_id = id_parts[4] if len(id_parts) > 4 else None

        title_item = ul.find("li", {"class": cls.SONG_TITLE_CLASS})
        title = capwords(title_item.text) if title_item else None

        artist_item = ul.find("li", {"class": cls.SONG_ARTIST_CLASS})
        artist = capwords(artist_item.text) if artist_item else None

        bit_rate = ul.find("li", {"class": cls.SONG_BIT_RATE_CLASS}).text
        bit_rate_parts = bit_rate.split()
        bit_rate = bit_rate_parts[0] if bit_rate_parts else None

        length = ul.find("li", {"class": cls.SONG_LENGTH_CLASS}).text
        length = Song.get_length_from_string(length)

        song_dict = {
            "song_id": song_id,
            "title": title,
            "artist": artist,
            "bit_rate": bit_rate,
            "length": length,
            "url": url,
        }

        return Song(**song_dict)
Example #20
0
def revise_stopname(stopname):
    # we ALWAYS want to apply these
    place_substitutions1 = [
        ('\[[^\[\]]+\] ?', ''),
        ('and *$', ''), # trailing ands...
        ]
    for subst in place_substitutions1:
        stopname = re.sub(subst[0], subst[1], stopname)

    # replace '... <...> CIVIC XX' w/ 'Near XX ...'
    m = re.match('(.+) (in front of|before|after|opposite|before and opposite) civic address ([0-9]+)', stopname)
    if m:
        return "Near %s %s" % (m.group(3), string.capwords(m.group(1)))

    boring_street_suffixes = [ "Ave", "Blvd", "Cr", "Crt", "Ct", "Dr", "Gate", "Pkwy", "Rd", "Row", 
                               "St" ]
    boring_street_regex = '(?:' + '|'.join(boring_street_suffixes) + ')'
    def strip_boring_street_suffix(text):
        m = re.match('(.*) ' + boring_street_regex, text)
        if m:
            return m.group(1)

        return text

    street_suffixes = boring_street_suffixes + [ "Hwy", "Terr" ]
    street_regex = '(?:' + '|'.join(street_suffixes) + ')'

    m = re.match('^(.*) ' + street_regex + ' (after|before|opposite|in front of) (.*) ' + street_regex + '$', stopname)
    if m:
        return "%s & %s" % (string.capwords(strip_boring_street_suffix(m.group(1))), 
                            string.capwords(strip_boring_street_suffix(m.group(3))))

    return string.capwords(stopname)
Example #21
0
    def clean(self):

        cleaned_data = self.cleaned_data
        session = cleaned_data.get('session')
        first_name = cleaned_data.get('first_name')
        last_name = cleaned_data.get('last_name')

        if first_name == None or last_name == None:
            return cleaned_data

        first_name = string.capwords(first_name)
        last_name = string.capwords(last_name)
        
        sess = Session.objects.get(pk=session)
        
        try:
            stud = Student.objects.get(course=sess.course,first_name=first_name,last_name=last_name)
            if stud in sess.student.all():
                raise forms.ValidationError("You are already signed in!")
            else:
                self.student = stud
        except Student.DoesNotExist:
            raise forms.ValidationError("You are not a member of this course!")
        
        return cleaned_data
Example #22
0
	def save(self, *args, **kwargs):
		super(Break, self).save(*args, **kwargs)
		if self.active:
			desc = "%s tok en pause!" % capwords(self.workday.slave.get_full_name())
		else:
			desc = "%s er ferdig med pausa!" % capwords(self.workday.slave.get_full_name())
		Activity(desc=desc, workday=self.workday).save()
Example #23
0
 def check_for_extra_permit_files():
     extra_permits = set()
     permits = glob('apps/regional/static/permits/*/*')
     for permit in permits:
         permit = re.sub('apps/regional/static/permits/', '', permit)
         state = permit[:2].upper()
         if re.match('city', permit[3:]):
             city = re.sub('city-|-all|-residential|-commercial|.pdf', '', permit[3:])
             city = string.capwords(re.sub('-', ' ', city))
             zipcode = Region.objects.filter(city=city, state=state)[0].zipcode
             permit = get_permit(zipcode, 'residential')
             if not permit or not permit.get('url'):
                 extra_permits.add('%s, %s' % (city, state))
         elif re.match('county', permit[3:]):
             county = re.sub('county-|-all|-residential|-commercial|.pdf', '', permit[3:])
             county = string.capwords(re.sub('-', ' ', county))
             zipcode = Region.objects.filter(county=county, state=state)[0].zipcode
             permit = get_permit(zipcode, 'residential')
             if not permit or not permit.get('url'):
                 extra_permits.add('%s, %s' % (county, state))
     extra_permits = list(extra_permits)
     extra_permits.sort()
     with open(path.join(path.dirname(__file__), 'extra_permits.txt'), 'w') as out:
         for permit in extra_permits:
             out.write('%s\n' % permit)
Example #24
0
 def delete_button_click(self):
     search = self.delete_combo_box.currentText()
     text = self.delete_line_edit.text()
     if search == 'ISBN':
         if not(Validations.is_valid_isbn(text)):
             QMessageBox(QMessageBox.Critical, "Error",
                         "Invalid ISBN. Please correct it!").exec_()
         books = select_by_isbn(text)
         if books != []:
             delete_by_isbn(text)
             QMessageBox(QMessageBox.Information, "Information",
                         "You successfully deleted this book!").exec_()
             return
         else:
             QMessageBox(QMessageBox.Information, "No results",
                         "There is NO such book in the library!").exec_()
             return
     elif search == 'Title':
         books = select_by_title(string.capwords(text))
         if books != []:
             delete_by_title(string.capwords(text))
             QMessageBox(QMessageBox.Information, "Information",
                         "You successfully deleted this book!").exec_()
             return
         else:
             QMessageBox(QMessageBox.Information, "No results",
                         "There is NO such book in the library!").exec_()
             return
Example #25
0
 def menu(n):
     if fg.get()=='1' or fg.get()=='0':
         m[0][0]=fg.get()
     if fg1.get()=='1' or fg1.get()=='0':
         m[0][1]=fg1.get()
     if fg2.get()=='1' or fg2.get()=='0':
         m[0][2]=fg2.get()
     if fg3.get()=='1' or fg3.get()=='0':
         m[1][0]=fg3.get()
     if fg4.get()=='1' or fg4.get()=='0':
         m[1][1]=fg4.get()
     if fg5.get()=='1' or fg5.get()=='0':
         m[1][2]=fg5.get()
     if fg6.get()=='1' or fg6.get()=='0':
         m[2][0]=fg6.get()
     if fg7.get()=='1' or fg7.get()=='0':
         m[2][1]=fg7.get()
     if fg8.get()=='1' or fg8.get()=='0':
         m[2][2]=fg8.get()
     gana=jugada()
     if gana=='0':
         tkMessageBox.showinfo("Felicitaciones!", "Ha Ganado el Jugador "+capwords(jugador1.get()))
     elif gana=='1':
         tkMessageBox.showinfo("Felicitaciones!", "Ha Ganado el Jugador "+capwords(jugador2.get()))
     else:
         f=0
         for i in m:
             if i==2:
                 f+=1
         if f==0:
             tkMessageBox.showinfo("Empate", "Jugada Terminada")
Example #26
0
 def get_job_metadata(self, page):
     """Collect metadata for extractor-job"""
     group  = ""
     gtype  = ""
     series = ""
     _     , pos = text.extract(page, '<h1><a href="/reader/', '')
     title , pos = text.extract(page, '.html">', "</a>", pos)
     _     , pos = text.extract(page, '<li><a href="/artist/', '', pos)
     artist, pos = text.extract(page, '.html">', '</a>', pos)
     test  , pos = text.extract(page, '<li><a href="/group/', '', pos)
     if test is not None:
         group , pos = text.extract(page, '.html">', '</a>', pos)
     test  , pos = text.extract(page, '<a href="/type/', '', pos)
     if test is not None:
         gtype , pos = text.extract(page, '.html">', '</a>', pos)
     _     , pos = text.extract(page, '<tdLanguage</td>', '', pos)
     lang  , pos = text.extract(page, '.html">', '</a>', pos)
     test  , pos = text.extract(page, '<a href="/series/', '', pos)
     if test is not None:
         series, pos = text.extract(page, '.html">', '</a>', pos)
     lang = lang.capitalize()
     return {
         "category": self.category,
         "gallery-id": self.gid,
         "title": title,
         "artist": string.capwords(artist),
         "group": string.capwords(group),
         "type": gtype[1:-1].capitalize(),
         "lang": iso639_1.language_to_code(lang),
         "language": lang,
         "series": string.capwords(series),
     }
Example #27
0
 def __setitem__(self, i, y):
     #print "set item: %s,%s" %(i,y)
     self._regexIsDirty = True
     # for each entry the user adds, we actually add three entrys:
     super(type(self),self).__setitem__(string.lower(i),string.lower(y)) # key = value
     super(type(self),self).__setitem__(string.capwords(i), string.capwords(y)) # Key = Value
     super(type(self),self).__setitem__(string.upper(i), string.upper(y)) # KEY = VALUE
Example #28
0
	def save(self, *args, **kwargs):
		super(Workday, self).save(*args, **kwargs)
		if self.active:
			desc = "%s sjekket inn!" % capwords(self.slave.get_full_name())
		else:
			desc = "%s sjekket ut!" % capwords(self.slave.get_full_name())
		Activity(desc=desc, workday=self).save()
Example #29
0
def Send_Images(bot, chat_id, user, requestText, args, keyConfig, total_number_to_send=1):
    data, total_results, results_this_page = Google_Custom_Search(args)
    if 'items' in data and total_results > 0:
        total_offset, total_results, total_sent = search_results_walker(args, bot, chat_id, data, total_number_to_send,
                                                                        user + ', ' + requestText, results_this_page,
                                                                        total_results, keyConfig)
        if int(total_sent) < int(total_number_to_send):
            if int(total_number_to_send) > 1:
                bot.sendMessage(chat_id=chat_id, text='I\'m sorry ' + (user if not user == '' else 'Dave') +
                                                      ', I\'m afraid I can\'t find any more images for ' +
                                                      string.capwords(requestText.encode('utf-8') + '.' +
                                                                      ' I could only find ' + str(
                                                          total_sent) + ' out of ' + str(total_number_to_send)))
            else:
                bot.sendMessage(chat_id=chat_id, text='I\'m sorry ' + (user if not user == '' else 'Dave') +
                                                      ', I\'m afraid I can\'t find any images for ' +
                                                      string.capwords(requestText.encode('utf-8')))
        else:
            return True
    else:
        if 'error' in data:
            bot.sendMessage(chat_id=chat_id, text='I\'m sorry ' + (user if not user == '' else 'Dave') +
                                                  data['error']['message'])
        else:
            bot.sendMessage(chat_id=chat_id, text='I\'m sorry ' + (user if not user == '' else 'Dave') +
                                                  ', I\'m afraid I can\'t find any images for ' +
                                                  string.capwords(requestText.encode('utf-8')))
Example #30
0
    def run(self, name):
        if name != 'you':
            print(string.capwords("f**k you {}!".format(name)), end='\n\n')
            return

        print(string.capwords("f**k you!"), end='\n\n')
        return
    def handle_event(self, event, players, cur_player, cities):
        if not self.is_close:
            self.area.handle_event(event)

            # sub_text & sub_board
            for i, box in enumerate(self.subtext_area):
                box.handle_event(event)
                if box.active:
                    if box != self.cur_subtext_area or self.city_update:
                        self.cur_subtext_area = box
                        self.get_subboard(box.org_text, players, cur_player,
                                          self.city_pick, cities)
                # get the subboard for that box
                if self.subboard:
                    for board in self.subboard:
                        board.handle_event(event)
                # if box.active = Move, record cur_move_ppl
                if self.cur_subtext_area:
                    if self.cur_subtext_area.org_text == 'Move':
                        if self.subboard:
                            if self.subboard[0].rtn_select():
                                self.cur_move_ppl = self.subboard[
                                    0].rtn_select()

            # update summary text after sub_text and sub_board updated
            summary_body = []
            if self.cur_subtext_area:
                summary_body = ['Action:  ' + self.cur_subtext_area.org_text]
                if self.cur_subtext_area.org_text == 'Move' and self.city_pick:
                    if self.cur_move_ppl:
                        summary_body.append('Whom:  ' + self.cur_move_ppl)
                    else:
                        summary_body.append('Whom:  ' + cur_player.name)
                    summary_body.append('To:  ' +
                                        string.capwords(self.city_pick.txt))

                    if len(self.subboard) > 1:
                        body = self.subboard[1].rtn_select()
                        if body:
                            summary_body.append('Card to Use:  ' + body)

                if self.subboard and not self.cur_subtext_area.org_text == 'Move':
                    for board in self.subboard:
                        title = board.title
                        body = board.rtn_select()
                        if body:
                            body = body if isinstance(body, list) else [body]
                            body = [string.capwords(s) for s in body]
                            if len(body) > 3:
                                summary_body.append(title + ':  ' +
                                                    ', '.join(body[:3]))
                                summary_body.append('         ' +
                                                    ', '.join(body[3:]))
                            else:
                                summary_body.append(title + ':  ' +
                                                    ', '.join(body))
            self.summary.add_body(summary_body,
                                  size=20,
                                  color=BLACK,
                                  line_space=20,
                                  indent=10,
                                  fit_size=50,
                                  n_col=1)

            # confirm bottom
            self.bottom.handle_event(event)
            self.done_bottom.handle_event(event)

            text = 'Player action used: ' + str(
                cur_player.action_used) + ' / ' + str(cur_player.action)
            self.action_used.add_text(text=text, size=18, color=BLACK)
def to_camel_case(s):
    return s[0].lower() + string.capwords(s, sep='_').replace('_', '')[1:] if s else s
Example #33
0
    def title(self, irc, msg, args, text):
        """<text>

        Returns <text> titlecased.
        """
        irc.reply(string.capwords(text, " "))
Example #34
0
def load_authority_file(cursor, path_to_authority_files, filename,
                        auth_file_to_entity_concept_mapping):
    print filename.upper()

    start = time()
    value_types = models.DValueType.objects.all()
    filepath = os.path.join(path_to_authority_files, filename)
    unicodecsv.field_size_limit(sys.maxint)
    errors = []
    lookups = Lookups()

    #create nodes for each authority document file and relate them to the authority document node in the concept schema
    auth_doc_file_name = str(filename)
    display_file_name = string.capwords(
        auth_doc_file_name.replace('_', ' ').replace('AUTHORITY DOCUMENT.csv',
                                                     '').strip())
    if auth_doc_file_name.upper(
    ) != 'ARCHES RESOURCE CROSS-REFERENCE RELATIONSHIP TYPES.E32.CSV':
        top_concept = Concept()
        top_concept.id = str(uuid.uuid4())
        top_concept.nodetype = 'Concept'
        top_concept.legacyoid = auth_doc_file_name
        top_concept.addvalue({
            'value': display_file_name,
            'language': settings.LANGUAGE_CODE,
            'type': 'prefLabel',
            'category': 'label'
        })
        lookups.add_relationship(source='00000000-0000-0000-0000-000000000001',
                                 type='hasTopConcept',
                                 target=top_concept.id)

    else:
        top_concept = Concept().get(id='00000000-0000-0000-0000-000000000005')
        top_concept.legacyoid = 'ARCHES RESOURCE CROSS-REFERENCE RELATIONSHIP TYPES.E32.csv'

    lookups.add_lookup(concept=top_concept, rownum=0)

    try:
        with open(filepath, 'rU') as f:
            rows = unicodecsv.DictReader(f,
                                         fieldnames=[
                                             'CONCEPTID', 'PREFLABEL',
                                             'ALTLABELS', 'PARENTCONCEPTID',
                                             'CONCEPTTYPE', 'PROVIDER'
                                         ],
                                         encoding='utf-8-sig',
                                         delimiter=',',
                                         restkey='ADDITIONAL',
                                         restval='MISSING')
            rows.next()  # skip header row
            for row in rows:
                try:
                    if 'MISSING' in row:
                        raise Exception(
                            'The row wasn\'t parsed properly. Missing %s' %
                            (row['MISSING']))
                    else:
                        legacyoid = row[u'CONCEPTID']
                        concept = Concept()
                        concept.id = legacyoid if is_uuid(
                            legacyoid) == True else str(uuid.uuid4())
                        concept.nodetype = 'Concept'  # if row[u'CONCEPTTYPE'].upper() == 'INDEX' else 'Collection'
                        concept.legacyoid = row[u'CONCEPTID']
                        concept.addvalue({
                            'value': row[u'PREFLABEL'],
                            'language': settings.LANGUAGE_CODE,
                            'type': 'prefLabel',
                            'category': 'label'
                        })
                        if row['CONCEPTTYPE'].lower() == 'collector':
                            concept.addvalue({
                                'value': row[u'PREFLABEL'],
                                'language': settings.LANGUAGE_CODE,
                                'type': 'collector',
                                'category': 'label'
                            })
                        if row[u'ALTLABELS'] != '':
                            altlabel_list = row[u'ALTLABELS'].split(';')
                            for altlabel in altlabel_list:
                                concept.addvalue({
                                    'value': altlabel,
                                    'language': settings.LANGUAGE_CODE,
                                    'type': 'altLabel',
                                    'category': 'label'
                                })

                        parent_concept_id = lookups.get_lookup(
                            legacyoid=row[u'PARENTCONCEPTID']).id
                        lookups.add_relationship(source=parent_concept_id,
                                                 type='narrower',
                                                 target=concept.id,
                                                 rownum=rows.line_num)
                        # don't add a member relationship between a top concept and it's children
                        if parent_concept_id != top_concept.id:
                            lookups.add_relationship(source=parent_concept_id,
                                                     type='member',
                                                     target=concept.id,
                                                     rownum=rows.line_num)

                        # add the member relationship from the E55 type (typically) to their top members
                        if auth_doc_file_name in auth_file_to_entity_concept_mapping and row[
                                u'PARENTCONCEPTID'] == auth_doc_file_name:
                            for entitytype_info in auth_file_to_entity_concept_mapping[
                                    auth_doc_file_name]:
                                lookups.add_relationship(
                                    source=entitytype_info[
                                        'ENTITYTYPE_CONCEPTID'],
                                    type='member',
                                    target=concept.id,
                                    rownum=rows.line_num)

                        if row[u'PARENTCONCEPTID'] == '' or (
                                row[u'CONCEPTTYPE'].upper() != 'INDEX' and
                                row[u'CONCEPTTYPE'].upper() != 'COLLECTOR'):
                            raise Exception('The row has invalid values.')

                        lookups.add_lookup(concept=concept,
                                           rownum=rows.line_num)

                except Exception as e:
                    errors.append('ERROR in row %s: %s' %
                                  (rows.line_num, str(e)))

    except UnicodeDecodeError as e:
        errors.append(
            'ERROR: Make sure the file is saved with UTF-8 encoding\n%s\n%s' %
            (str(e), traceback.format_exc()))
    except Exception as e:
        errors.append('ERROR: %s\n%s' % (str(e), traceback.format_exc()))

    if len(errors) > 0:
        errors.insert(0, 'ERRORS IN FILE: %s\n' % (filename))
        errors.append('\n\n\n\n')

    try:
        # try and open the values file if it exists
        if exists(filepath.replace('.csv', '.values.csv')):
            with open(filepath.replace('.csv', '.values.csv'), 'rU') as f:
                rows = unicodecsv.DictReader(
                    f,
                    fieldnames=['CONCEPTID', 'VALUE', 'VALUETYPE', 'PROVIDER'],
                    encoding='utf-8-sig',
                    delimiter=',',
                    restkey='ADDITIONAL',
                    restval='MISSING')
                rows.next()  # skip header row
                for row in rows:
                    try:
                        if 'ADDITIONAL' in row:
                            raise Exception(
                                'The row wasn\'t parsed properly. Additional fields found %s.  Add quotes to values that have commas in them.'
                                % (row['ADDITIONAL']))
                        else:
                            row_valuetype = row[u'VALUETYPE'].strip()
                            if row_valuetype not in value_types.values_list(
                                    'valuetype', flat=True):
                                valuetype = models.DValueType()
                                valuetype.valuetype = row_valuetype
                                valuetype.category = 'undefined'
                                valuetype.namespace = 'arches'
                                valuetype.save()

                            value_types = models.DValueType.objects.all()
                            concept = lookups.get_lookup(
                                legacyoid=row[u'CONCEPTID'])
                            category = value_types.get(
                                valuetype=row_valuetype).category
                            concept.addvalue({
                                'value': row[u'VALUE'],
                                'type': row[u'VALUETYPE'],
                                'category': category
                            })

                    except Exception as e:
                        errors.append('ERROR in row %s (%s): %s' %
                                      (rows.line_num, str(e), row))

    except UnicodeDecodeError as e:
        errors.append(
            'ERROR: Make sure the file is saved with UTF-8 encoding\n%s\n%s' %
            (str(e), traceback.format_exc()))
    except Exception as e:
        errors.append('ERROR: %s\n%s' % (str(e), traceback.format_exc()))

    if len(errors) > 0:
        errors.insert(
            0,
            'ERRORS IN FILE: %s\n' % (filename.replace('.csv', '.values.csv')))
        errors.append('\n\n\n\n')

    # insert and index the concpets
    for key in lookups.lookup:
        try:
            lookups.lookup[key]['concept'].save()
        except Exception as e:
            errors.append('ERROR in row %s (%s):\n%s\n' %
                          (lookups.lookup[key]['rownum'], str(e),
                           traceback.format_exc()))

        lookups.lookup[key]['concept'].index(scheme=top_concept)

    # insert the concept relations
    for relation in lookups.concept_relationships:
        sql = """
            INSERT INTO relations(relationid, conceptidfrom, conceptidto, relationtype)
            VALUES (public.uuid_generate_v1mc(), '%s', '%s', '%s');
        """ % (relation['source'], relation['target'], relation['type'])
        #print sql
        try:
            cursor.execute(sql)
        except Exception as e:
            errors.append('ERROR in row %s (%s):\n%s\n' %
                          (relation['rownum'], str(e), traceback.format_exc()))

    if len(errors) > 0:
        errors.insert(0, 'ERRORS IN FILE: %s\n' % (filename))
        errors.append('\n\n\n\n')

    #print 'Time to parse = %s' % ("{0:.2f}".format(time() - start))

    return errors
Example #35
0
def crawl_meta(meta_hdf5=None,
               write_meta_name='data.hdf5',
               crawl_review=False):

    if meta_hdf5 is None:
        # Crawl the meta data from OpenReview
        # Set up a browser to crawl from dynamic web pages
        from selenium import webdriver
        from selenium.webdriver.chrome.options import Options

        # from pyvirtualdisplay import Display
        # display = Display(visible=0, size=(800, 800))
        # display.start()

        import time
        executable_path = '/usr/local/bin/chromedriver'
        options = Options()
        options.add_argument("--headless")
        browser = webdriver.Chrome(options=options,
                                   executable_path=executable_path)

        # Load all URLs for all ICLR submissions
        urls = []
        with open('urls.txt') as f:
            urls = f.readlines()
        urls = [url.strip() for url in urls]

        meta_list = []
        wait_time = 0.25
        max_try = 1000
        for i, url in enumerate(urls):
            browser.get(url)
            time.sleep(wait_time)
            key = browser.find_elements_by_class_name("note_content_field")
            key = [k.text for k in key]
            withdrawn = 'Withdrawal Confirmation:' in key
            desk_reject = 'Desk Reject Comments:' in key
            value = browser.find_elements_by_class_name("note_content_value")
            value = [v.text for v in value]

            # title
            title = string.capwords(
                browser.find_element_by_class_name("note_content_title").text)
            # abstract
            valid = False
            tries = 0
            while not valid:
                if 'Abstract:' in key:
                    valid = True
                else:
                    time.sleep(wait_time)
                    tries += 1
                    key = browser.find_elements_by_class_name(
                        "note_content_field")
                    key = [k.text for k in key]
                    withdrawn = 'Withdrawal Confirmation:' in key
                    desk_reject = 'Desk Reject Comments:' in key
                    value = browser.find_elements_by_class_name(
                        "note_content_value")
                    value = [v.text for v in value]
                    if tries >= max_try:
                        print('Reached max try: {} ({})'.format(title, url))
                        break
            abstract = ' '.join(value[key.index('Abstract:')].split('\n'))
            # keyword
            if 'Keywords:' in key:
                keyword = value[key.index('Keywords:')].split(',')
                keyword = [k.strip(' ') for k in keyword]
                keyword = [
                    ''.join(string.capwords(k).split(' ')) for k in keyword
                    if not k == ''
                ]
                for j in range(len(keyword)):
                    if '-' in keyword[j]:
                        keyword[j] = ''.join([
                            string.capwords(kk) for kk in keyword[j].split('-')
                        ])
            else:
                keyword = []
            # rating
            rating_idx = [i for i, x in enumerate(key) if x == "Rating:"]
            rating = []
            if len(rating_idx) > 0:
                for idx in rating_idx:
                    rating.append(int(value[idx].split(":")[0]))

            if crawl_review:
                review_idx = [i for i, x in enumerate(key) if x == "Review:"]
                # review = []
                review_len = []
                if len(review_idx) > 0:
                    for idx in review_idx:
                        review_len.append(
                            len([
                                w for w in value[idx].replace('\n', ' ').split(
                                    ' ') if not w == ''
                            ]))
                        # review.append(value[idx])
            # decision
            if 'Recommendation:' in key:
                decision = value[key.index('Recommendation:')]
            else:
                decision = 'N/A'

            # log
            log_str = '[{}] Abs: {} chars, keywords: {}, ratings: {}'.format(
                i + 1,
                len(abstract),
                len(keyword),
                rating,
            )
            if crawl_review:
                log_str += ', review len: {}'.format(review_len)
            if not decision == 'N/A':
                log_str += ', decision: {}'.format(decision)
            log_str += '] {}'.format(title)

            if withdrawn:
                log_str += ' (withdrawn)'
            if desk_reject:
                log_str += ' (desk_reject)'
            print(log_str)

            meta_list.append(
                PaperMeta(
                    title,
                    abstract,
                    keyword,
                    rating,
                    url,
                    withdrawn,
                    desk_reject,
                    decision,
                    # None if not crawl_review else review,
                    None if not crawl_review else review_len,
                ))

        # Save the crawled data
        write_meta(meta_list, write_meta_name)
    else:
        # Load the meta data from local
        meta_list = read_meta(meta_hdf5)
    return meta_list
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import string

s = 'The quick brown fox jumped over the lazy dog.'

print(s)
print(string.capwords(s))
import string
# maketrans() and capwords are not being moved to the string object from the string module
# Also, string.Template

s = "The quick brown fox jumped over the lazy dog."
leet = string.maketrans('abegiloprstz', '463611092572')

print "Base String     :", s
print "Capwords String :", string.capwords(s)
print "Translate String:", s.translate(leet)


values = { 'var':'foo' }


# Template vs. string interpolation

t = string.Template("""
Variable        : $var
Escape          : $$
Variable in text: ${var}iable
""")

print '\nTEMPLATE:', t.substitute(values)

s = """
Variable        : %(var)s
Escape          : %%
Variable in text: %(var)siable
"""
Example #38
0
    def getPage(**kwargs):
        """
        This method connects to the target URL or proxy and returns
        the target URL page content
        """

        if isinstance(conf.delay, (int, float)) and conf.delay > 0:
            time.sleep(conf.delay)
        elif conf.cpuThrottle:
            cpuThrottle(conf.cpuThrottle)

        if conf.dummy:
            return randomStr(int(randomInt()),
                             alphabet=[chr(_) for _ in xrange(256)
                                       ]), {}, int(randomInt())

        threadData = getCurrentThreadData()
        with kb.locks.request:
            kb.requestCounter += 1
            threadData.lastRequestUID = kb.requestCounter

        url = kwargs.get("url", None) or conf.url
        get = kwargs.get("get", None)
        post = kwargs.get("post", None)
        method = kwargs.get("method", None)
        cookie = kwargs.get("cookie", None)
        ua = kwargs.get("ua", None) or conf.agent
        referer = kwargs.get("referer", None) or conf.referer
        host = kwargs.get("host", None) or conf.host
        direct_ = kwargs.get("direct", False)
        multipart = kwargs.get("multipart", False)
        silent = kwargs.get("silent", False)
        raise404 = kwargs.get("raise404", True)
        timeout = kwargs.get("timeout", None) or conf.timeout
        auxHeaders = kwargs.get("auxHeaders", None)
        response = kwargs.get("response", False)
        ignoreTimeout = kwargs.get("ignoreTimeout", False) or kb.ignoreTimeout
        refreshing = kwargs.get("refreshing", False)
        retrying = kwargs.get("retrying", False)
        crawling = kwargs.get("crawling", False)
        skipRead = kwargs.get("skipRead", False)

        if not urlparse.urlsplit(url).netloc:
            url = urlparse.urljoin(conf.url, url)

        # flag to know if we are dealing with the same target host
        target = reduce(
            lambda x, y: x == y,
            map(lambda x: urlparse.urlparse(x).netloc.split(':')[0],
                [url, conf.url or ""]))

        if not retrying:
            # Reset the number of connection retries
            threadData.retriesCount = 0

        # fix for known issue when urllib2 just skips the other part of provided
        # url splitted with space char while urlencoding it in the later phase
        url = url.replace(" ", "%20")

        conn = None
        code = None
        page = None

        _ = urlparse.urlsplit(url)
        requestMsg = u"HTTP request [#%d]:\n%s " % (
            threadData.lastRequestUID, method or
            (HTTPMETHOD.POST if post is not None else HTTPMETHOD.GET))
        requestMsg += ("%s%s" %
                       (_.path or "/",
                        ("?%s" % _.query) if _.query else "")) if not any(
                            (refreshing, crawling)) else url
        responseMsg = u"HTTP response "
        requestHeaders = u""
        responseHeaders = None
        logHeaders = u""
        skipLogTraffic = False

        raise404 = raise404 and not kb.ignoreNotFound

        # support for non-latin (e.g. cyrillic) URLs as urllib/urllib2 doesn't
        # support those by default
        url = asciifyUrl(url)

        # fix for known issues when using url in unicode format
        # (e.g. UnicodeDecodeError: "url = url + '?' + query" in redirect case)
        url = unicodeencode(url)

        try:
            socket.setdefaulttimeout(timeout)

            if direct_:
                if '?' in url:
                    url, params = url.split('?', 1)
                    params = urlencode(params)
                    url = "%s?%s" % (url, params)
                    requestMsg += "?%s" % params

            elif multipart:
                # Needed in this form because of potential circle dependency
                # problem (option -> update -> connect -> option)
                from lib.core.option import proxyHandler

                multipartOpener = urllib2.build_opener(
                    proxyHandler, multipartpost.MultipartPostHandler)
                conn = multipartOpener.open(unicodeencode(url), multipart)
                page = Connect._connReadProxy(conn) if not skipRead else None
                responseHeaders = conn.info()
                responseHeaders[URI_HTTP_HEADER] = conn.geturl()
                page = decodePage(
                    page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING),
                    responseHeaders.get(HTTP_HEADER.CONTENT_TYPE))

                return page

            elif any((refreshing, crawling)):
                pass

            elif target:
                if conf.forceSSL and urlparse.urlparse(url).scheme != "https":
                    url = re.sub("\Ahttp:", "https:", url, re.I)
                    url = re.sub(":80/", ":443/", url, re.I)

                if PLACE.GET in conf.parameters and not get:
                    get = conf.parameters[PLACE.GET]

                    if not conf.skipUrlEncode:
                        get = urlencode(get, limit=True)

                if get:
                    url = "%s?%s" % (url, get)
                    requestMsg += "?%s" % get

                if PLACE.POST in conf.parameters and not post and method in (
                        None, HTTPMETHOD.POST):
                    post = conf.parameters[PLACE.POST]

            elif get:
                url = "%s?%s" % (url, get)
                requestMsg += "?%s" % get

            requestMsg += " %s" % httplib.HTTPConnection._http_vsn_str

            # Prepare HTTP headers
            headers = forgeHeaders({
                HTTP_HEADER.COOKIE: cookie,
                HTTP_HEADER.USER_AGENT: ua,
                HTTP_HEADER.REFERER: referer
            })

            if kb.authHeader:
                headers[HTTP_HEADER.AUTHORIZATION] = kb.authHeader

            if kb.proxyAuthHeader:
                headers[HTTP_HEADER.PROXY_AUTHORIZATION] = kb.proxyAuthHeader

            headers[HTTP_HEADER.ACCEPT] = HTTP_ACCEPT_HEADER_VALUE
            headers[
                HTTP_HEADER.
                ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE if kb.pageCompress else "identity"
            headers[HTTP_HEADER.HOST] = host or getHostHeader(url)

            if post is not None and HTTP_HEADER.CONTENT_TYPE not in headers:
                headers[
                    HTTP_HEADER.CONTENT_TYPE] = POST_HINT_CONTENT_TYPES.get(
                        kb.postHint, DEFAULT_CONTENT_TYPE)

            if headers.get(
                    HTTP_HEADER.CONTENT_TYPE) == POST_HINT_CONTENT_TYPES[
                        POST_HINT.MULTIPART]:
                warnMsg = "missing 'boundary parameter' in '%s' header. " % HTTP_HEADER.CONTENT_TYPE
                warnMsg += "Will try to reconstruct"
                singleTimeWarnMessage(warnMsg)

                boundary = findMultipartPostBoundary(conf.data)
                if boundary:
                    headers[HTTP_HEADER.CONTENT_TYPE] = "%s; boundary=%s" % (
                        headers[HTTP_HEADER.CONTENT_TYPE], boundary)

            if auxHeaders:
                for key, item in auxHeaders.items():
                    for _ in headers.keys():
                        if _.upper() == key.upper():
                            del headers[_]
                    headers[key] = item

            for key, item in headers.items():
                del headers[key]
                headers[unicodeencode(key, kb.pageEncoding)] = unicodeencode(
                    item, kb.pageEncoding)

            post = unicodeencode(post, kb.pageEncoding)

            if method:
                req = MethodRequest(url, post, headers)
                req.set_method(method)
            else:
                req = urllib2.Request(url, post, headers)

            requestHeaders += "\n".join(
                "%s: %s" %
                (key.capitalize() if isinstance(key, basestring) else key,
                 getUnicode(value)) for (key, value) in req.header_items())

            if not getRequestHeader(req, HTTP_HEADER.COOKIE) and conf.cj:
                conf.cj._policy._now = conf.cj._now = int(time.time())
                cookies = conf.cj._cookies_for_request(req)
                requestHeaders += "\n%s" % ("Cookie: %s" % ";".join(
                    "%s=%s" %
                    (getUnicode(cookie.name), getUnicode(cookie.value))
                    for cookie in cookies))

            if post is not None:
                if not getRequestHeader(req, HTTP_HEADER.CONTENT_LENGTH):
                    requestHeaders += "\n%s: %d" % (string.capwords(
                        HTTP_HEADER.CONTENT_LENGTH), len(post))

            if not getRequestHeader(req, HTTP_HEADER.CONNECTION):
                requestHeaders += "\n%s: close" % HTTP_HEADER.CONNECTION

            requestMsg += "\n%s" % requestHeaders

            if post is not None:
                requestMsg += "\n\n%s" % getUnicode(post)

            requestMsg += "\n"

            threadData.lastRequestMsg = requestMsg

            logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg)

            conn = urllib2.urlopen(req)

            if not kb.authHeader and getRequestHeader(
                    req, HTTP_HEADER.AUTHORIZATION
            ) and conf.authType == AUTH_TYPE.BASIC:
                kb.authHeader = getRequestHeader(req,
                                                 HTTP_HEADER.AUTHORIZATION)

            if not kb.proxyAuthHeader and getRequestHeader(
                    req, HTTP_HEADER.PROXY_AUTHORIZATION):
                kb.proxyAuthHeader = getRequestHeader(
                    req, HTTP_HEADER.PROXY_AUTHORIZATION)

            # Return response object
            if response:
                return conn, None, None

            # Get HTTP response
            if hasattr(conn, 'redurl'):
                page = (threadData.lastRedirectMsg[1] if kb.redirectChoice == REDIRECTION.NO\
                  else Connect._connReadProxy(conn)) if not skipRead else None
                skipLogTraffic = kb.redirectChoice == REDIRECTION.NO
                code = conn.redcode
            else:
                page = Connect._connReadProxy(conn) if not skipRead else None

            code = code or conn.code
            responseHeaders = conn.info()
            responseHeaders[URI_HTTP_HEADER] = conn.geturl()
            page = decodePage(
                page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING),
                responseHeaders.get(HTTP_HEADER.CONTENT_TYPE))
            status = getUnicode(conn.msg)

            if extractRegexResult(META_REFRESH_REGEX, page) and not refreshing:
                url = extractRegexResult(META_REFRESH_REGEX, page)

                debugMsg = "got HTML meta refresh header"
                logger.debug(debugMsg)

                if kb.alwaysRefresh is None:
                    msg = "sqlmap got a refresh request "
                    msg += "(redirect like response common to login pages). "
                    msg += "Do you want to apply the refresh "
                    msg += "from now on (or stay on the original page)? [Y/n]"
                    choice = readInput(msg, default="Y")

                    kb.alwaysRefresh = choice not in ("n", "N")

                if kb.alwaysRefresh:
                    if url.lower().startswith('http://'):
                        kwargs['url'] = url
                    else:
                        kwargs['url'] = conf.url[:conf.url.rfind('/') +
                                                 1] + url

                    threadData.lastRedirectMsg = (threadData.lastRequestUID,
                                                  page)
                    kwargs['refreshing'] = True
                    kwargs['get'] = None
                    kwargs['post'] = None

                    try:
                        return Connect._getPageProxy(**kwargs)
                    except SqlmapSyntaxException:
                        pass

            # Explicit closing of connection object
            if not conf.keepAlive:
                try:
                    if hasattr(conn.fp, '_sock'):
                        conn.fp._sock.close()
                    conn.close()
                except Exception, msg:
                    warnMsg = "problem occurred during connection closing ('%s')" % msg
                    logger.warn(warnMsg)

        except urllib2.HTTPError, e:
            page = None
            responseHeaders = None

            try:
                page = e.read() if not skipRead else None
                responseHeaders = e.info()
                responseHeaders[URI_HTTP_HEADER] = e.geturl()
                page = decodePage(
                    page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING),
                    responseHeaders.get(HTTP_HEADER.CONTENT_TYPE))
            except socket.timeout:
                warnMsg = "connection timed out while trying "
                warnMsg += "to get error page information (%d)" % e.code
                logger.warn(warnMsg)
                return None, None, None
            except KeyboardInterrupt:
                raise
            except:
                pass
            finally:
                page = page if isinstance(page, unicode) else getUnicode(page)

            code = e.code
            threadData.lastHTTPError = (threadData.lastRequestUID, code)

            kb.httpErrorCodes[code] = kb.httpErrorCodes.get(code, 0) + 1

            status = getUnicode(e.msg)
            responseMsg += "[#%d] (%d %s):\n" % (threadData.lastRequestUID,
                                                 code, status)

            if responseHeaders:
                logHeaders = "\n".join("%s: %s" % (getUnicode(key.capitalize(
                ) if isinstance(key, basestring) else key), getUnicode(value))
                                       for (key,
                                            value) in responseHeaders.items())

            logHTTPTraffic(
                requestMsg,
                "%s%s\n\n%s" % (responseMsg, logHeaders,
                                (page or "")[:MAX_CONNECTION_CHUNK_SIZE]))

            skipLogTraffic = True

            if conf.verbose <= 5:
                responseMsg += getUnicode(logHeaders)
            elif conf.verbose > 5:
                responseMsg += "%s\n\n%s" % (logHeaders,
                                             (page or
                                              "")[:MAX_CONNECTION_CHUNK_SIZE])

            logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg)

            if e.code == httplib.UNAUTHORIZED and not conf.ignore401:
                errMsg = "not authorized, try to provide right HTTP "
                errMsg += "authentication type and valid credentials (%d)" % code
                raise SqlmapConnectionException(errMsg)
            elif e.code == httplib.NOT_FOUND:
                if raise404:
                    errMsg = "page not found (%d)" % code
                    raise SqlmapConnectionException(errMsg)
                else:
                    debugMsg = "page not found (%d)" % code
                    singleTimeLogMessage(debugMsg, logging.DEBUG)
                    processResponse(page, responseHeaders)
            elif e.code == httplib.GATEWAY_TIMEOUT:
                if ignoreTimeout:
                    return None, None, None
                else:
                    warnMsg = "unable to connect to the target URL (%d - %s)" % (
                        e.code, httplib.responses[e.code])
                    if threadData.retriesCount < conf.retries and not kb.threadException:
                        warnMsg += ". sqlmap is going to retry the request"
                        logger.critical(warnMsg)
                        return Connect._retryProxy(**kwargs)
                    elif kb.testMode:
                        logger.critical(warnMsg)
                        return None, None, None
                    else:
                        raise SqlmapConnectionException(warnMsg)
            else:
                debugMsg = "got HTTP error code: %d (%s)" % (code, status)
                logger.debug(debugMsg)
        Keyword: base0e,  # .k
        Keyword.Type: base08,  # .kt
        Name.Attribute: base0d,  # .na
        Name.Builtin: base0d,  # .nb
        Name.Builtin.Pseudo: base08,  # .bp
        Name.Class: base0d,  # .nc
        Name.Constant: base09,  # .no
        Name.Decorator: base09,  # .nd
        Name.Function: base0d,  # .nf
        Name.Namespace: base0d,  # .nn
        Name.Tag: base0e,  # .nt
        Name.Variable: base0d,  # .nv
        Name.Variable.Instance: base08,  # .vi
        Number: base09,  # .m
        Operator: base0c,  # .o
        Operator.Word: base0e,  # .ow
        Literal: base0b,  # .l
        String: base0b,  # .s
        String.Interpol: base0f,  # .si
        String.Regex: base0c,  # .sr
        String.Symbol: base09,  # .ss
    }


from string import capwords  # noqa: E402
Base16Style.__name__ = 'Base16{}Style'.format(
    capwords('gruvbox-dark-medium', '-').replace('-', ''))
globals()[Base16Style.__name__] = globals()['Base16Style']
del globals()['Base16Style']
del capwords
Example #40
0
 def clean_category(category_name):
     # capitalize every word, separated by "_", replace "_" with spaces
     return re.sub("_", " ", string.capwords(category_name, "_"))
def Scrap_data(browser, get_htmlSource):

    SegFeild = []
    for data in range(45):
        SegFeild.append('')
    Decoded_get_htmlSource: str = html.unescape(str(get_htmlSource))
    Decoded_get_htmlSource: str = re.sub(' +', ' ',
                                         str(Decoded_get_htmlSource)).replace(
                                             "\n", "").replace("<br>", "")
    a = True
    while a == True:
        try:
            # ==================================================================================================================
            # Email_ID

            Email_ID = Decoded_get_htmlSource.partition(
                "Direcciones de Correo:</td>")[2].partition("</tr>")[0]
            Email_ID = Email_ID.partition("<a href=")[2].partition("</td>")[0]
            Email_ID = Email_ID.partition(">")[2].partition("</a>")[0].strip()
            SegFeild[1] = Email_ID

            # ==================================================================================================================
            # Address

            Municipality = Decoded_get_htmlSource.partition(
                "Municipio:")[2].partition("</span>")[0]
            cleanr = re.compile('<.*?>')
            Municipality = re.sub(cleanr, '', Municipality).strip()
            # if Municipality != "":
            #     Municipality = Translate(Municipality).lower()
            # else:pass

            Direction = Decoded_get_htmlSource.partition(
                "Dirección:")[2].partition("</span>")[0]
            cleanr = re.compile('<.*?>')
            Direction = re.sub(cleanr, '', Direction)
            # if Direction != "":
            #     Direction = Translate(Direction).strip()
            # else:pass

            Phones = Decoded_get_htmlSource.partition(
                "Teléfonos:")[2].partition("</span>")[0]
            cleanr = re.compile('<.*?>')
            Phones = re.sub(cleanr, '', Phones).strip()

            Fax_Numbers = Decoded_get_htmlSource.partition(
                "Números de Fax:")[2].partition("</span>")[0]
            cleanr = re.compile('<.*?>')
            Fax_Numbers = re.sub(cleanr, '', Fax_Numbers).strip()

            Postal_mail = Decoded_get_htmlSource.partition(
                "Apartado Postal:")[2].partition("</span>")[0]
            cleanr = re.compile('<.*?>')
            Postal_mail = re.sub(cleanr, '', Postal_mail).strip()

            if Postal_mail != "[--No Especificado--]":
                Collected_Address = Municipality + "," + Direction + "<br>\n" + "Teléfonos: " + Phones + "<br>\n" + "Números de Fax: " + Fax_Numbers + "<br>\n" + "Apartado Postal: " + Postal_mail
                SegFeild[2] = Collected_Address
            else:
                Collected_Address = str(Municipality) + "," + str(
                    Direction) + "<br>\n" + "Teléfonos: " + str(Phones)
                Collected_Address = string.capwords(
                    str(Collected_Address.strip()))
                SegFeild[2] = Collected_Address

            # ==================================================================================================================
            # Country

            SegFeild[7] = "GT"

            # ==================================================================================================================
            # Purchaser WebSite URL

            Websites = Decoded_get_htmlSource.partition(
                "Páginas Web:")[2].partition("</tr>")[0]
            Websites = Websites.partition("<a href=\"")[2].partition(
                "\" target")[0].strip()
            if Websites != "[--No Especificado--]":
                SegFeild[8] = Websites.strip()
            else:
                SegFeild[8] = ""

            # ==================================================================================================================
            # Purchaser Name

            Entity = Decoded_get_htmlSource.partition(
                "MasterGC_ContentBlockHolder_lblEntidad")[2].partition(
                    "</span>")[0]
            Entity = Entity.partition('">')[2].strip()
            if Entity != "":
                # Entity = Translate(Entity)
                SegFeild[12] = Entity.strip().upper()
            else:
                SegFeild[12] = ""

            # ==================================================================================================================
            # Tender no

            Tender_no = Decoded_get_htmlSource.partition("NOG:")[2].partition(
                "</b>")[0]
            cleanr = re.compile('<.*?>')
            Tender_no = re.sub(cleanr, '', Tender_no)
            SegFeild[13] = Tender_no.strip()

            # ==================================================================================================================
            # notice type
            SegFeild[14] = "2"

            # ==================================================================================================================
            # Tender Details

            Title = Decoded_get_htmlSource.partition(
                "Descripción:  </div>")[2].partition("</div>")[0]
            cleanr = re.compile('<.*?>')
            Title = re.sub(cleanr, '', Title)
            Title = string.capwords(str(Title.strip()))
            if Title != "":
                # Title = Translate(Title)
                SegFeild[19] = Title
            else:
                pass

            Modality = Decoded_get_htmlSource.partition(
                "Modalidad:  </div>")[2].partition("</div>")[0]
            cleanr = re.compile('<.*?>')
            Modality = re.sub(cleanr, '', Modality)
            Modality = string.capwords(str(Modality.strip()))
            # if Modality != "":
            #     Modality = Translate(Modality)
            # else:
            #     pass

            Type_of_contest = Decoded_get_htmlSource.partition(
                "Tipo de concurso:  </div>")[2].partition("</div>")[0]
            cleanr = re.compile('<.*?>')
            Type_of_contest = re.sub(cleanr, '', Type_of_contest)
            Type_of_contest = string.capwords(str(Type_of_contest.strip()))
            # if Type_of_contest != "":
            #     Type_of_contest = Translate(Type_of_contest)
            # else:pass

            Receiving_Offers = Decoded_get_htmlSource.partition(
                "Tipo de recepción de ofertas: </div>")[2].partition(
                    "</div>")[0]
            cleanr = re.compile('<.*?>')
            Receiving_Offers = re.sub(cleanr, '', Receiving_Offers).strip()
            Receiving_Offers = string.capwords(str(Receiving_Offers.strip()))
            # if Receiving_Offers != "":
            #     Receiving_Offers = Translate(Receiving_Offers)
            # else:pass

            Process_Type = Decoded_get_htmlSource.partition(
                "Tipo Proceso:  </div>")[2].partition("</div>")[0]
            cleanr = re.compile('<.*?>')
            Process_Type = re.sub(cleanr, '', Process_Type)
            Process_Type = string.capwords(str(Process_Type.strip()))
            # if Process_Type != "":
            #     Process_Type = Translate(Process_Type)
            # else:pass

            Compliance_Bond_percentage = Decoded_get_htmlSource.partition(
                "Porcentaje de Fianza de cumplimiento:  </div>")[2].partition(
                    "</div>")[0]
            cleanr = re.compile('<.*?>')
            Compliance_Bond_percentage = re.sub(cleanr, '',
                                                Compliance_Bond_percentage)
            # if Compliance_Bond_percentage != "":
            #     Compliance_Bond_percentage = Translate(Compliance_Bond_percentage).strip()
            # else:
            #     pass
            Percentage_of_support_bond = Decoded_get_htmlSource.partition(
                "Porcentaje de Fianza de sostenimiento:  </div>")[2].partition(
                    "</div>")[0]
            cleanr = re.compile('<.*?>')
            Percentage_of_support_bond = re.sub(
                cleanr, '', Percentage_of_support_bond).strip()

            Status = Decoded_get_htmlSource.partition(
                "> Estatus:  </div>")[2].partition("</div>")[0]
            cleanr = re.compile('<.*?>')
            Status = re.sub(cleanr, '', Status).strip()
            Status = string.capwords(str(Status.strip()))
            # if Status != "":
            #     Status = Translate(Status)
            # else:
            #     pass

            Collected_Tender_Details = str(Title) + "<br>\n" + "Modalidad: " + str(Modality) + "<br>\n" + "Tipo de concurso: " + str(Type_of_contest) + "<br>\n" + "Tipo de recepción de ofertas: " + str(Receiving_Offers)\
                                        + "<br>\n" + "Tipo Proceso: " + str(Process_Type) + "<br>\n" + "Porcentaje de Fianza de cumplimiento: " + str(Compliance_Bond_percentage) + "<br>\n" + "Porcentaje de Fianza de sostenimiento: " + str(Percentage_of_support_bond) \
                                        + "<br>\n" + "Estatus: " + str(Status)
            Collected_Tender_Details = string.capwords(
                str(Collected_Tender_Details.strip()))
            SegFeild[18] = Collected_Tender_Details

            # ==================================================================================================================
            # Tender Submission Date

            Bid_submission_date = Decoded_get_htmlSource.partition(
                "Fecha de presentación de ofertas:  </div>")[2].partition(
                    "</div>")[0]
            cleanr = re.compile('<.*?>')
            Bid_submission_date = re.sub(cleanr, '', Bid_submission_date)
            Bid_submission_date = Bid_submission_date.partition(
                "Hora:")[0].strip().replace(' ', '')
            Month = Bid_submission_date.partition(".")[2].partition(
                ".")[0].strip().lower()

            if Month == "enero":
                Bid_submission_date = Bid_submission_date.replace(
                    '.enero.', '.January.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "febrero":
                Bid_submission_date = Bid_submission_date.replace(
                    '.febrero.', '.February.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "marzo":
                Bid_submission_date = Bid_submission_date.replace(
                    '.marzo.', '.March.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "abril":
                Bid_submission_date = Bid_submission_date.replace(
                    '.abril.', '.April.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "mayo":
                Bid_submission_date = Bid_submission_date.replace(
                    '.mayo.', '.May.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "junio":
                Bid_submission_date = Bid_submission_date.replace(
                    '.junio.', '.June.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "julio":
                Bid_submission_date = Bid_submission_date.replace(
                    '.julio.', '.July.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "agosto":
                Bid_submission_date = Bid_submission_date.replace(
                    '.agosto.', '.August.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "septiembre":
                Bid_submission_date = Bid_submission_date.replace(
                    '.septiembre.', '.September.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "octubre":
                Bid_submission_date = Bid_submission_date.replace(
                    '.octubre.', '.October.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "noviembre":
                Bid_submission_date = Bid_submission_date.replace(
                    '.noviembre.', '.November.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            elif Month == "diciembre":
                Bid_submission_date = Bid_submission_date.replace(
                    '.diciembre.', '.December.')
                datetime_object = datetime.strptime(Bid_submission_date,
                                                    '%d.%B.%Y')
                mydate = datetime_object.strftime("%Y-%m-%d")
                SegFeild[24] = mydate

            SegFeild[22] = "0"

            SegFeild[26] = "0.0"

            SegFeild[27] = "0"  # Financier

            SegFeild[
                28] = "https://www.guatecompras.gt/concursos/consultaConcurso.aspx?nog=" + str(
                    SegFeild[13]).strip()

            # Source Name
            SegFeild[31] = 'guatecompras.gt'

            SegFeild[20] = ''
            SegFeild[21] = ''
            SegFeild[42] = SegFeild[7]
            SegFeild[43] = ''
            for SegIndex in range(len(SegFeild)):
                print(SegIndex, end=' ')
                print(SegFeild[SegIndex])
                SegFeild[SegIndex] = html.unescape(str(SegFeild[SegIndex]))
                SegFeild[SegIndex] = str(SegFeild[SegIndex]).replace("'", "''")
            if len(SegFeild[19]) >= 200:
                SegFeild[19] = str(SegFeild[19])[:200] + '...'

            if len(SegFeild[18]) >= 1500:
                SegFeild[18] = str(SegFeild[18])[:1500] + '...'

            check_date(get_htmlSource, SegFeild)
            a = False

        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print("Error ON : ",
                  sys._getframe().f_code.co_name + "--> " + str(e), "\n",
                  exc_type, "\n", fname, "\n", exc_tb.tb_lineno)
            a = True
Example #42
0
def just_do_it(text):
    from string import capwords
    return capwords(text)
Example #43
0
def main():
    if not os.path.isdir("./output/"):
        os.makedirs("./output/")

    if len(sys.argv) > 1:
        input_path = sys.argv[1]
    else:
        input_path = './data/bibjson'
    if not os.path.exists(input_path):
        print "Could not find bibjson file! Exiting."
        sys.exit(1)
    with open(input_path) as fid:
        bib = json.load(fid)

    #list for making bibdesk bibliography
    biblist = []

    # header + footer for bbl bibliography
    hdr='\\begin{thebibliography}{1000}\n' +\
        '\\expandafter\\ifx\\csname url\\endcsname\\relax\n' +\
        '\\def\\url#1{\\texttt{#1}}\\fi\n' +\
        '\\expandafter\\ifx\\csname urlprefix\\endcsname\\relax\\def\\urlprefix{URL }\\fi\n' +\
        '\\providecommand{\\bibinfo}[2]{#2}\n' +\
        '\\providecommand{\\eprint}[2][]{\\url{#2}}\n'
    ftr = '\\end{thebibliography}\n'

    # list for making bbl bibliography
    bibitems = [hdr]

    # loop through each bibitem

    for item in bib:
        # TODO: make sure vars are reset

        #IMPORTANT VARIABLES TO GATHER

        names = []

        # document id
        docid = item['_gddid']

        # title
        title = item['title']

        # journal
        journal = item['journal']['name']

        # type of document
        typ = item['type']

        # volume number
        if 'volume' in item.keys():
            volume = item['volume']
        else:
            volume = ''

        # number issue
        if 'number' in item.keys():
            number = item['number']
        else:
            number = ''

        # pages
        if 'pages' in item.keys():
            pages = item['pages']
        else:
            pages = ''

        # publication year
        if 'year' in item.keys():
            year = item['year']
        else:
            year = ''

        # authors, with formatting fixes
        if 'author' in item.keys():
            for name in item['author']:
                names.append(clean(name['name']))
        else:
            names = ''

        # publisher, with formatting fixes
        if 'publisher' in item.keys():
            publisher = clean(item['publisher'])
        else:
            publisher = ''

        # url link
        if 'link' in item.keys():
            if 'url' in item['link'][0]:
                link = clean(item['link'][0]['url'])
            else:
                link = ''
        else:
            link = ''

        # clean up the text fields
        title = clean(title)
        if title.isupper():
            title = string.capwords(title)

        journal = clean(journal)
        if journal.isupper():
            journal = string.capwords(journal)

        for i, a in enumerate(names):
            if a.isupper():
                names[i] = string.capwords(names[i])

        if publisher != 'USGS':  # Assume all non-USGS documents are articles
            bibtemp='@' + typ + '{' + docid + ',\n' + \
                    'title={{' + title + '}},\n'  + \
                    'author={' + ' and '.join(names) + '},\n' + \
                    'journal={' + journal +'},\n'+\
                    'volume={' + volume + '},\n'+\
                    'year={' + year +'},\n'+\
                    'number={' + str(number) + '},\n'+\
                    'pages={' + pages + '}\n}'
        else:  # assume that all USGS documents are tech reports
            bibtemp='@techreport{' + docid + ',\n' + \
                'title={{' + title + '}},\n'  + \
                'author={' + ' and '.join(names) + '},\n' + \
                'year={' + year +'},\n'+\
                'institution={' + publisher + '},\n'+\
                'booktitle={' + publisher + ', ' + journal + '}\n}'

        # grow the bibliography list
        biblist.append(bibtemp)

        #### initiatize variables for bbl bibliography
        cite_tmp = '\\bibitem{' + docid + '}\n'
        name_tmp = []
        title_tmp = ''
        journal_tmp = ''
        volume_tmp = ''
        pages_tmp = ''
        inst_tmp = ''

        #### format author names for bbl
        if names != [''] and names != '' and names:
            for n in names:
                if n != '':
                    # formatting if author written as 'Shaw, C.A.'
                    if ',' in n and n[-1] != ',':
                        tmp = n.split(',')
                        tmp[-1] = tmp[-1].replace(' ', '')
                        name_tmp.append('\\bibinfo{author}{' + tmp[0] + ', ' +
                                        tmp[-1][0] + '.}, ')
                    # formatting if author written as 'Charles Shaw'
                    else:
                        tmp = n.split(' ')
                        name_tmp.append('\\bibinfo{author}{' + tmp[-1] + ', ' +
                                        tmp[0][0] + '.}, ')

            # no comma needed after last author
            name_tmp[-1] = name_tmp[-1][0:-2] + '\n'

            # if more than one author, separate last author from rest with ampersand
            if len(name_tmp) > 1:
                name_tmp[-2] = name_tmp[-2][0:-2] + ' \& '

            # join formatted authors into one string
            name_tmp = ''.join(name_tmp)
        else:  #if no authors found, define as empty string
            name_tmp = ''

        #### format title for bbl

        if link == '':  # if no link included, create normal title
            # some titles do not have periods or question marks at the end
            if title[-1] != '.' and title[-1] != '?':
                title_tmp = '\\newblock \\bibinfo{title}{' + title + '.}\n'
            # others do
            else:
                title_tmp = '\\newblock \\bibinfo{title}{' + title + '}\n'
        elif link != '':  # if link is included, make the title a link to the document.
            # some titles do not have periods or question marks at the end
            if title[-1] != '.' and title[-1] != '?':
                title_tmp = '\\newblock \\bibinfo{title}{\\href{' + link + '}{{\color{blue}' + title + '.}}}\n'
            # others do
            else:
                title_tmp = '\\newblock \\bibinfo{title}{\\href{' + link + '}{{\color{blue}' + title + '}}}\n'

        #### formating journal name, volume, pages if valid article
        if publisher != 'USGS':
            if journal != '':
                journal_tmp = '\\newblock \\emph{\\bibinfo{journal}{' + journal + '}}\n'
            if volume != '':
                volume_tmp = '\\textbf{\\bibinfo{volume}{' + volume + '}}\n'
            if pages != '' and volume_tmp != '':  # if both pages and volume are present
                volume_tmp = volume_tmp[0:-1] + ', '
                pages_tmp = '\\bibinfo{pages}{' + pages + '}\n'
            elif pages != '' and volume_tmp == '':  # if both pages are present, but not volume
                #requires slight change to journal name
                journal_tmp = '\\newblock \\emph{\\bibinfo{journal}{' + journal + ', }}\n'
                pages_tmp = '\\bibinfo{pages}{' + pages + '}\n'
        else:  # if USGS is publisher, format as technical report
            journal_tmp = ''
            volume_tmp = ''
            pages_tmp = ''
            inst_tmp = '\\newblock \\bibinfo{type}{Tech. Rep.}, \\bibinfo{institution}{' + publisher + '} '

        #### formatting the year
        if year != '':
            year_tmp = '(\\bibinfo{year}{' + year + '})\n'

        # list for bbl
        bibitems.append(''.join([
            cite_tmp, name_tmp, title_tmp, journal_tmp, volume_tmp, pages_tmp,
            inst_tmp, year_tmp, '\n'
        ]))

    # add footer at very bottom
    bibitems.append(ftr)

    # print the bibtex string
    with codecs.open('./output/bibtk.bib', 'wb', 'utf-8') as f1:
        f1.write('\n'.join(biblist))

    # print the bbl string
    with codecs.open('./output/cite.bbl', 'wb', 'utf-8') as f2:
        f2.write(''.join(bibitems))

    # make a simple tex file to input bbl
    references='\\documentclass[12pt]{article}\n' +\
        '\\topmargin 0.0cm\n' +\
        '\\oddsidemargin 0.2cm\n' +\
        '\\textwidth 16cm\n' +\
        '\\textheight 21cm\n' +\
        '\\footskip 1.0cm\n' +\
        '\\usepackage[utf8x]{inputenc}\n'+\
        '\\usepackage{hyperref}\n'+\
        '\\hypersetup{colorlinks=false,pdfborder={0 0 0}}\n'+\
        '\\usepackage[usenames]{color}\n'+\
        '\\begin{document}\n' +\
        '\\input{cite.bbl}\n'+\
        '\\end{document}\n'
    with open('./output/references.tex', 'wb') as f3:
        f3.write(references)

    #SUMMARIZE PUBLISHERS AND JOURNALS IN THIS BIBJSON
    all_publishers = np.array([clean(a['publisher']) for a in bib])
    all_journals = all_journals = [
        tuple((clean(a['journal']['name']), clean(a['publisher'])))
        for a in bib
    ]

    #UNIQUE PUBLISHERS AND NUMBER OF OCCURENCES
    u, counts = np.unique(all_publishers, return_counts=True)

    #MAKE A STRUCTURED ARRAY
    pub_table = np.zeros(len(u),
                         dtype={
                             'names': ['name', 'count'],
                             'formats': [u.dtype, 'i4']
                         })
    pub_table['name'] = u
    pub_table['count'] = counts

    #SORT BY NUMBER OF OCCURENCES
    pub_table = np.flipud(np.sort(pub_table, order='count'))

    #COUNT NUMBER OCCURENCES OF JOURNAL-PUBLISHER TUPLES
    count_map = {}
    for t in all_journals:
        count_map[t] = count_map.get(t, 0) + 1

    #MAKE A STRUCTURED ARRAY
    journal_table = np.zeros(len(count_map),
                             dtype={
                                 'names': ['name', 'pub', 'count'],
                                 'formats': [
                                     np.array([a[0] for a in count_map.keys()
                                               ]).dtype,
                                     np.array([a[1] for a in count_map.keys()
                                               ]).dtype, 'i4'
                                 ]
                             })

    for i, j in enumerate(count_map):
        journal_table['name'][i] = j[0]
        journal_table['pub'][i] = j[1]
        journal_table['count'][i] = count_map[j]

    #SORT BY NUMBER OF OCCURENCES
    journal_table = np.flipud(np.sort(journal_table, order='count'))

    #INITIATE A LATEX TABLE FOR PUBLISHERS SUMMARY
    latex_pub='\\begin{center} \n' +\
                '\\begin{longtable}{|l|r|} \\hline \n' +\
                '\\multicolumn{2}{|c|}{\\textbf{Publisher Totals}}\\\ \hline\n' +\
                'name&number references\\\ \hline\n'

    #LOOP THROUGH PUBLISHER SUMMARY AND APPEND TO LATEX STRING AS NEW ROW
    for p in pub_table:
        latex_pub = latex_pub + p['name'] + '&' + str(p['count']) + '\\\ \n'

    #END THE PUBLISHER TABLE
    latex_pub=latex_pub+'\hline\end{longtable}\n'+\
                '\end{center}'

    #INITIATE A LATEX TABLE FOR JOURNALS SUMMARY
    latex_journal='\\newpage'+\
                '\\begin{landscape}'+\
                '\\begin{center} \n' +\
                '\\begin{longtable}{|l|l|r|} \\hline \n' +\
                '\\multicolumn{3}{|c|}{\\textbf{Journal Totals}}\\\ \hline\n'+\
                'name&publisher&number references\\\ \hline\n'

    #LOOP THROUGH JOURNALS SUMMARY AND APPEND TO LATEX STRING AS NEW ROW(S)
    for j in journal_table:
        #SET MAXIMUM WORD NUMBER PER ROW SO LATEX DOES NOT POOP ITSELF
        words = j['name'].split()
        #CHUNKED TITLE; LIST OF <= 5 WORD N-GRAMS
        subs = [" ".join(words[i:i + 5]) for i in range(0, len(words), 5)]

        #LOOP THROUGH CHUNKS AND APPEND TO STRING
        for i in range(len(subs)):
            #FIRST CHUNK GETS THE JOURNAL TOTAL
            if i == 0:
                latex_journal = latex_journal + subs[i] + '&' + j[
                    'pub'] + '&' + str(j['count']) + '\\\ \n'
            #ALL SUBSEQUENT CHUNKS GET INDENTED
            else:
                latex_journal = latex_journal + '\\hspace{5mm}' + subs[
                    i] + '&&\\\ \n'

    #END THE JOURNAL TABLE
    latex_journal=latex_journal+'\\hline\\end{longtable}\n'+\
                '\\end{center}\n'+\
                '\\end{landscape}'

    #PREAMBLE FOR BOTH TABLES
    preamble='\\documentclass[12pt]{article}\n'+\
                '\\usepackage{longtable}\n'+\
                '\\usepackage[utf8x]{inputenc}\n'+\
                '\\usepackage{pdflscape}\n'+\
                '\\begin{document}\n'

    #CONCATENATE THE PREAMBLE, BOTH TABLES, AND END THE LATEX DOCUMENT
    table = preamble + latex_pub + latex_journal + '\n\\end{document}'

    #PRINT THE LATEX TABLES
    with codecs.open('./output/table.tex', 'wb', 'utf-8') as f4:
        f4.write(table)
Example #44
0
def cap_text(text):
    return capwords(text)  # replace .capitalize() with .title()
def FORMAT_TO_FILENAME(s):
    # format string: "practical-php-7-mysql-8" >> "Practical Php 7 Mysql 8"
    s = s.replace('-', ' ')
    return string.capwords(s)
Example #46
0
    async def process_command(self, msg : discord.Message, command, params):
        issuer = msg.author
        if command == 'custom1_kill_newcomer':
            if not issuer.id == 91929884145766400: #Mouse only command for debugging
                await msg.channel.send( "You are not Mouse.")
                return

            newcomer_role = next(r for r in self.server.roles if r.name == 'Newcomer')
            everyone_role = msg.channel.server.default_role
            people = msg.channel.server.members
            channels = msg.channel.server.channels

            for channel in channels:
               poveride = channel.overwrites_for(newcomer_role) or discord.PermissionOverwrite()
               poveride.send_messages = False
               poveride.read_messages = False

               peoveride = channel.overwrites_for(everyone_role) or discord.PermissionOverwrite()
               peoveride.read_messages = True
               peoveride.send_messages = True

               await channel.set_permissions(newcomer_role, poveride)
               await channel.set_permissions(everyone_role, peoveride)

        elif command == 'soberall':
            if not issuer.id == 91929884145766400: #Mouse only command for debugging
                await msg.channel.send( "You are not Mouse.")
                return
            for d in self.dosed.keys():
                us = await self.FindUser(d,msg)
                await us.edit(nick=self.dosed[d]['dname'])
            self.dosed = {}
            pickle.dump(self.dosed,Path('dosed.pkl').open('wb'))

        elif command == 'warn':
            if not msg.channel.permissions_for(issuer).kick_members:
                await msg.channel.send("You don't have the proper permissions {}".format(issuer.mention))
                return

            user = await self.FindUser(' '.join(params),msg)
            if user is None:
                await msg.channel.send( "No user with that name")
                return

            if user == issuer:
                await msg.channel.send( "Yo-...you can't warn yourself... Why would you **want** to?")
                return

            warns = {}
            if not Path('warns.pkl').exists():
                warns[user.id] = 1
            else:
                warns = pickle.load(Path('warns.pkl').open('rb'))
                if user.id in warns:
                    warns[user.id] += 1
                    await msg.channel.send("{} has been warned. He currently has {}/{} warnings".format(user.mention,warns[user.id],self.max_warnings))
                else:
                    warns[user.id] = 1
            pickle.dump(warns,Path('warns.pkl').open('wb'))
        
        elif command == 'unwarn':
            if not msg.channel.permissions_for(issuer).kick_members:
                await msg.channel.send("You don't have the proper permissions {}".format(issuer.mention))
                return

            user = await self.FindUser(params[0],msg)
            if user is None:
                await msg.channel.send( "No user with that name")
                return

            if user == issuer:
                await msg.channel.send( "Yo-...you can't warn yourself... Why would you **want** to?")
                return

            warns = {}
            if not Path('warns.pkl').exists():
                await msg.channel.send("There are no warnings.")
                return
            else:
                warns = pickle.load(Path('warns.pkl').open('rb'))
                if user.id in warns and warns[user.id] > 0:
                    warns[user.id] -= 1
                    await msg.channel.send("1 warning has been removed for {}. He currently has {}/{} warnings".format(user.mention,warns[user.id],self.max_warnings))
                else:
                    await msg.channel.send("{} did not have any warnings".format(user.mention))

            pickle.dump(warns,Path('warns.pkl').open('wb'))

        elif command == 'warnings':
            user = None
            if len(params) > 0:
                user = await self.FindUser(' '.join(params),msg)
                if not msg.channel.permissions_for(issuer).kick_members and user != issuer:
                    await msg.channel.send("You don't have the proper permissions {}".format(issuer.mention))
                    return
            
            warns = {}
            if not Path('warns.pkl').exists():
                await msg.channel.send("There aren't any warnings on file.")
            else:
                warns = pickle.load(Path('warns.pkl').open('rb'))
                if user != None:
                    if user.id in warns:
                        await msg.channel.send("{} has {}/{} warnings".format(user.mention,warns[user.id],self.max_warnings))
                    else:
                        await msg.channel.send("{} has 0/{} warnings".format(user.mention,self.max_warnings))
                else:
                    if issuer.id in warns:
                        await msg.channel.send("{} has {}/{} warnings".format(issuer.mention,warns[issuer.id],self.max_warnings))
                    else:
                        await msg.channel.send("{} has 0/{} warnings".format(issuer.mention,self.max_warnings))

        elif command == 'warningsall':
            if not msg.channel.permissions_for(issuer).kick_members:
                await msg.channel.send("You don't have the proper permissions {}".format(issuer.mention))
                return
            
            warns = {}
            if not Path('warns.pkl').exists():
                await msg.channel.send("There aren't any warnings on file.")
            else:
                warns = pickle.load(Path('warns.pkl').open('rb'))

            await msg.channel.send("\n".join([f"{self.get_user(key)}.display_name | {value} warnings" for (key,value) in warns.items()]))

        elif command == 'mute':
            if not msg.channel.permissions_for(issuer).manage_messages:
                await msg.channel.send( "You don't have the proper permissions {}".format(issuer.mention))
                return

            user = await self.FindUser(params[0],msg)
            if user is None:
                await msg.channel.send( "No user with that name")
                return

            if user == issuer:
                await msg.channel.send( "Yo-...you can't mute yourself...")
                return

            mute_role = discord.utils.get(user.guild.roles, name="Muted")

            path = Path('muted.pkl')
            
            if path.exists():
                temp = pickle.load(path.open('rb'))
                if user.id in temp:
                    await msg.channel.send("{0} is already muted".format(user.display_name))
                else:
                    self.muted.append(user.id)
                    pickle.dump(self.muted,path.open('wb'))
                    await msg.channel.send( "{0} was muted by {1}".format(user.mention, issuer.mention))
            else:
                self.muted.append(user.id)
                pickle.dump(self.muted,path.open('wb'))
                await msg.channel.send( "{0} was muted by {1}".format(user.mention, issuer.mention))

            await user.add_roles(mute_role)

        elif command == 'unmute':
            if not msg.channel.permissions_for(issuer).manage_messages:
                await msg.channel.send( "You don't have the proper permissions {}".format(issuer.mention))
                return

            user = await self.FindUser(params[0],msg)
            if user is None:
                await msg.channel.send( "No user with that name")
                return

            if user == issuer:
                await msg.channel.send( "You shouldn't be muted...")
                return

            mute_role = discord.utils.get(user.guild.roles, name="Muted")

            if user.id in self.muted:
                self.muted.remove(user.id)
                pickle.dump(self.muted,Path('muted.pkl').open('wb'))
                await msg.channel.send( "{0} was re-granted posting privileges by {1}".format(user.mention, issuer.mention))
            else:
                await msg.channel.send( "{0} was never muted.".format(user.mention))

            await user.remove_roles(mute_role)

        elif command == 'imgmute':
            if not msg.channel.permissions_for(issuer).manage_messages:
                await msg.channel.send( "You don't have the proper permissions {}".format(issuer.mention))
                return

            user = await self.FindUser(params[0],msg)
            if user is None:
                await msg.channel.send( "No user with that name")
                return

            if user == issuer:
                await msg.channel.send( "You can't image mute yourself... No matter how much you probably should.")
                return

            path = Path('imgmuted.pkl')
            
            if path.exists():
                temp = pickle.load(path.open('rb'))
                if user.id in temp:
                    await msg.channel.send("{0} is already image muted".format(user.display_name))
                else:
                    self.imgmuted.append(user.id)
                    pickle.dump(self.imgmuted,path.open('wb'))
                    await msg.channel.send( "{0} was image muted by {1}".format(user.mention, issuer.mention))
            else:
                self.imgmuted.append(user.id)
                pickle.dump(self.imgmuted,path.open('wb'))
                await msg.channel.send( "{0} was image muted by {1}".format(user.mention, issuer.mention))

        elif command == 'imgunmute':
            if not msg.channel.permissions_for(issuer).manage_messages:
                await msg.channel.send( "You don't have the proper permissions {}".format(issuer.mention))
                return

            user = await self.FindUser(params[0], msg)
            if user is None:
                await msg.channel.send( "No user with that name")
                return

            if user == issuer:
                await msg.channel.send( "You shouldn't be image muted...")
                return

            if user.id in self.imgmuted:
                self.imgmuted.remove(user.id)
                pickle.dump(self.imgmuted,Path('imgmuted.pkl').open('wb'))
                await msg.channel.send( "{0} was re-granted image posting privileges by {1}".format(user.mention, issuer.mention))
            else:
                await msg.channel.send( "{0} was never image muted.".format(user.mention))
            
        elif command == 'addrole':
            if not msg.channel.permissions_for(issuer).manage_roles:
                await msg.channel.send( "You don't have the proper permissions {}".format(issuer.mention))
                return

            user = await self.FindUser(params[0], msg)
            if user is None:
                await msg.channel.send( "No user with that name")
                return

            role = next(r for r in self.server.roles if ' '.join(params[1:]).lower() == r.name.lower())

            if role is None:
                await msg.channel.send( "No role called {0}".format(params[1]))
            else:
                await user.add_roles(role)
                await msg.channel.send( "{0} was given the role {1} by {2}!".format(user.mention,role.name,issuer.mention))

        elif command == 'delrole':
            if not msg.channel.permissions_for(issuer).manage_roles:
                await msg.channel.send( "You don't have the proper permissions {}".format(issuer.mention))
                return

            user = await self.FindUser(params[0], msg)
            if user is None:
                await msg.channel.send( "No user with that name")
                return

            role = next(r for r in self.server.roles if ' '.join(params[1:]).lower() == r.name.lower())

            if role is None:
                await msg.channel.send( "No role called {0}".format(params[1]))
            else:
                await user.remove_roles(role)
                await msg.channel.send( "{0} was stripped of the role {1} by {2}.".format(user.mention,role.name,issuer.mention))

        elif command == 'selfie':
            selfie_channel = next(c for c in self.server.channels if 'selfies' in c.name.lower())
            logs = await selfie_channel.history(limit=2000).flatten()

            #async for m in selfie_channel.history(limit=2000):
            #    if len(m.attachments) > 0 and m.author == issuer:
            #        logs.append(m)

            pics = [m.attachments[0] for m in logs]
            if pics is None or len(pics) == 0:
                await msg.channel.send( "You don't have any selfies posted in {0}".format(selfie_channel.mention))

            pic = random.choice(pics)

            embed = discord.Embed(title="{0}'s selfie".format(issuer.display_name), description="Selfie Image")
            embed.set_image(url=pic['url'])
            embed.set_author(name=issuer.display_name)
            await msg.channel.send( embed=embed)

        elif command == 'ping':
            await msg.channel.send( "p-...pong?")

        elif command == 'dosed':
            if params[0] == 'start':
                if issuer.id in self.dosed.keys():
                    tm = self.dosed[issuer.id]['start_time']

                    tm = (datetime.datetime.fromtimestamp(tm) - datetime.timedelta(hours=5, minutes=0)).strftime('%I:%M:%S %p')
                    
                    await msg.channel.send( "You first dosed (issued this command) at {0}".format(tm))
                else:
                    await msg.channel.send( "You first dosed at... no, wait you didn't.")
                return
            elif params[0] == 'last':
                if issuer.id in self.dosed.keys():
                    if 'last_redose' not in self.dosed[issuer.id]:
                        await msg.channel.send( "You haven't told me you redosed")
                        return

                    tm = self.dosed[issuer.id]['last_redose']

                    tm = (datetime.datetime.fromtimestamp(tm) - datetime.timedelta(hours=5, minutes=0)).strftime('%I:%M:%S %p')
                    
                    await msg.channel.send( "You last re-dosed (issued the redose command) at {0}".format(tm))
                else:
                    await msg.channel.send( "You last re-dosed at... no, wait you didn't.")
                return
            drug = params[0].lower()
            dosage = ''.join(params[1:])

            if re.match('\d+([umk]?g)',dosage) is None:
                await msg.channel.send( "That dosage doesn't make sense to me...")
                return

            jsn = json.load(open('tripsit_drugs.json','r', encoding="utf8"))

            drugs = list(jsn['data'][0].keys())
            chosen_drug = None

            if drug not in drugs:
                #look through aliases
                for d in drugs:
                    if 'aliases' in jsn['data'][0][d]:
                        if drug in jsn['data'][0][d]['aliases']:
                            chosen_drug = d
            else:
                chosen_drug = next(d for d in drugs if d == drug)

            if chosen_drug == None:
                await msg.channel.send( "Couldn't find a drug by that name.")
                return

            if issuer.id in self.dosed:
                await msg.channel.send( "You're already listed as on something")
                return

            ch_nn = issuer.display_name + '/' + chosen_drug.capitalize() + '/' + dosage
            if len(ch_nn) > 32:
                to_rem = len(ch_nn)-32+1
                chosen_drug = chosen_drug[:-to_rem]+'.'

            self.dosed[issuer.id] = {'name': issuer.display_name, 'dname': ch_nn, 'dosage': dosage, 'start_time': time.time() }
            pickle.dump(self.dosed,Path('dosed.pkl').open('wb'))

            await issuer.edit(nick=self.dosed[issuer.id]['dname'])
            await msg.channel.send( "{0} is high on {1} ({2}). Be nice, and good vibes!".format(issuer.mention,drug.capitalize(),dosage))

        elif command == 'sober':
            if issuer.id not in self.dosed:
                await msg.channel.send( "You're not listed as a currently dosed user")
                return

            await issuer.edit(nick=self.dosed[issuer.id]['name'])
            await msg.channel.send( "{0} is now sober. Hope you had fun!".format(issuer.mention))
            del self.dosed[issuer.id]
            pickle.dump(self.dosed,Path('dosed.pkl').open('wb'))
        
        elif command == 'redose':
            if len(re.findall('\d+?([umk])?g', ''.join(params[0:]))) == 0:
                await msg.channel.send( "That dosage doesn't make sense to me...")
                return

            if issuer.id not in self.dosed:
                await msg.channel.send( "You never let me know you dosed anything... Try .help")
                return

            amount = int(re.findall(r'\d+', ''.join(params[0:]))[0])
            last_dosage = int(re.findall(r'\d+', self.dosed[issuer.id]['dosage'])[0])
            last_unit = re.findall(r'[^\d]+', self.dosed[issuer.id]['dosage'])[0]

            self.dosed[issuer.id]['last_redose'] = time.time()
            self.dosed[issuer.id]['dosage'] = str(last_dosage + amount) + last_unit
            self.dosed[issuer.id]['dname'] = self.dosed[issuer.id]['dname'].replace(str(last_dosage) + last_unit, self.dosed[issuer.id]['dosage'])
            
            await issuer.edit(nick=self.dosed[issuer.id]['dname'])
            await msg.channel.send( "{0} redosed for {1} for a total of {2}".format(issuer.display_name, ''.join(params[0:]), self.dosed[issuer.id]['dosage']))
            pickle.dump(self.dosed,Path('dosed.pkl').open('wb'))

        elif command == 'drug':
            drug = ''.join(params)

            res = urlopen('http://tripbot.tripsit.me/api/tripsit/getDrug?name={0}'.format(drug)).read()
            res = json.loads(res)

            if res['err'] == True:
                await msg.channel.send( "No drug found by that name")
            else:
                data = res['data'][0]
                embed = None
                if 'links' in data:
                    embed = discord.Embed(title=data['pretty_name'], description='[Drug information](http://drugs.tripsit.me/{0})\n[Experiences]({1})'.format(data['name'],data['links']['experiences']))
                else:
                    embed = discord.Embed(title=data['pretty_name'], description='[Drug information](http://drugs.tripsit.me/{0})\n[Experiences]({1})'.format(data['name'],"None Reported"))
                embed.add_field(name='Summary', value=data['properties']['summary'])
                if 'effects' in data['properties']:
                    embed.add_field(name='Effects', value=data['properties']['effects'], inline = False)
                embed.add_field(name='Onset', value=data['properties']['onset'], inline = False)
                embed.add_field(name='Duration', value=data['properties']['duration'], inline = False)
                embed.add_field(name='After Effects', value=data['properties']['after-effects'], inline = False)

                for roa in data['formatted_dose']:
                    value = ""
                    dos_cats = list(data['formatted_dose'][roa])
                    dos_vals = list(data['formatted_dose'][roa].values())

                    for i in range(len(dos_cats)):
                        value += "**{0}** - {1}\n".format(dos_cats[i], dos_vals[i])

                    embed.add_field(name='Dosage ({0})'.format(roa), value=value)
                
                embed.add_field(name='Categories', value="\n".join([string.capwords(c.replace('-',' ')) for c in data['properties']['categories']]), inline = False)

                await msg.channel.send( embed=embed)

        elif command == 'slap':
            slapee = await self.FindUser(' '.join(params[0:]),msg)

            im = Image.open('slap.gif')
            frames = []
            for frame in ImageSequence.Iterator(im):
                d = ImageDraw.Draw(frame)
                f = ImageFont.truetype('CODE2000.ttf',16)
                d.text((120,50), slapee.display_name, font=f)
                del d

                b = io.BytesIO()
                frame.save(b, format="GIF")
                frame = Image.open(b)
                frames.append(frame)

            frames[0].save('slap_out.gif', save_all=True, append_images=frames[1:])
            f = open('slap_out.gif','rb')
            await msg.channel.send("*slaps {0}*".format(slapee.mention),file=discord.File(f))
            f.close()
            im.close()
            os.remove('slap_out.gif')

        elif command == 'trivia':
            if self.in_trivia:
                if params[0].lower() == 'join':
                    if self.trivia_instance.initialized == True and self.trivia_instance.started == False:
                        if msg.author.id in self.trivia_instance.current_players:
                            await msg.channel.send( "You are already in this game, {0}".format(msg.author.mention))
                        else:
                            self.trivia_instance.addPlayer(msg.author.id) 
                            await msg.channel.send( "{0} added to the player list".format(msg.author.mention))
                        return
                elif params[0].lower() == 'end':
                    if msg.author == self.trivia_instance.host or msg.author.id == self.mouse_discord_id:
                        self.in_trivia = False
                        self.trivia_instance.End()
                        self.trivia_instance = None
                elif params[0].lower() == 'start':
                    if self.trivia_instance.initialized == True and self.trivia_instance.started == False:
                        if msg.author == self.trivia_instance.host:
                            self.trivia_instance.started = True
                            await msg.channel.send( "Starting {0} trivia game with {1} players! First one to {2} points wins!".format(self.trivia_instance.current_subject.capitalize(), len(self.trivia_instance.current_players),self.trivia_instance.max_score))
                            await asyncio.sleep(2)
                            await msg.channel.send( "TRIVIA QUESTION ({0})\n\n{1}".format(self.trivia_instance.current_subject,self.trivia_instance.current_question.question.capitalize()))
                        else:
                            await msg.channel.send( "You are not the host of this game, {0}. {1} is".format(msg.author.mention,self.trivia_instance.host.mention))

                    else:
                        await msg.channel.send( "Game of trivia already started and in progress...")

                    return

                else:
                    await msg.channel.send( "Trivia game is already in progress")
                return

            if params[0].lower() == 'subjects':
                subjects = [x.stem.capitalize() for x in Path('./trivia_questions/').iterdir() if x.is_file()]
                await msg.channel.send( "Trivia subjects\n============\n{0}".format("\n".join(subjects)))
                return
            elif params[0].lower() == 'highscores':
                ths = TriviaHighScoreTable()
                limit = 5
                ths.readFromFile()

                if len(params) == 2 and is_number(params[1]):
                    limit = int(params[1])

                top_5_msg = "TRIVIA HIGHSCORES (TOP {0})\n===================\n".format(str(limit))
                i = 1
                for entry, points in ths.players.items():
                    p = next(m for m in self.server.members if m.id == entry)
                    top_5_msg += "{0}.) {1} - {2}\n".format(str(i), p.display_name, p.mention)

                await msg.channel.send( top_5_msg)


            if len(params) == 2 and is_number(params[1]):
                self.trivia_instance = Trivia(params[0].lower(),int(params[1]))
                if len(self.trivia_instance.current_questions) == 0:
                    await msg.channel.send( "No trivia topic: {0}".format(params[0]))
                    self.trivia_instance = None
                else:
                    await msg.channel.send( "Trivia game with subject {0} initialized. Type '{1}trivia join' to enter! {1}trivia start to start (host only)".format(params[0], self.trigger))
                    self.in_trivia = True
                    self.trivia_instance.host = msg.author
                    self.trivia_instance.addPlayer(msg.author.id)

        elif command == 'whohas':
            role = next(r for r in self.server.roles if ' '.join(params[0:]).lower() == r.name.lower())

            people = filter(lambda m: role in m.roles, self.server.members)
            people = list(people)
            await msg.channel.send( ', '.join([m.display_name for m in people]))
            pass

        elif command == 'help':
            help_message = ""
            for k in self.commands.keys():
                help_message += k + " - " + self.commands[k] + "\n"
            await msg.channel.send( help_message)
Example #47
0
    def classify(self):

        # Declaration des variables
        titre, self.type, movie_year, season_episode, genre = (None, None,
                                                               None, None,
                                                               None)

        # On detecte si c'est une serie televisee en detectant le pattern S01E02
        season_episode = re.search(r"[Ss]\d\d[Ee]\d\d", self._nom_fichier)

        # Si oui l'item sera DEFINITIVEMENT considere comme une serie
        if season_episode:

            self.type = 'Serie'

            index_debut, index_fin = season_episode.span()

            season_episode = season_episode.group()
            season_episode = season_episode.upper()
            print('Season_episode = ', season_episode)

            titre = self._nom_fichier[:(index_debut - 1)]
            titre = self.purify(titre)
            titre = string.capwords(titre)
            if debug:
                print('Regex detecte', titre, self.type)

            # On contre-verifie sur TMDB
            titre_verifie = self.verify(titre)

            if titre_verifie:
                print('titre verifie:', titre_verifie)

            # S'il retourne positif on remplace les donnees par celles recoltees
            if titre_verifie:
                titre, self.type, movie_year, genre = titre_verifie
                self.move_file(titre, self.type, season_episode)

            # Si TMDB ne retourne rien on essaie IMDB
            else:
                titre_imdb = self.search_imdb(titre)
                if titre_imdb:
                    titre = titre_imdb
                    self.move_file(titre, self.type, season_episode)

            if not titre_verifie:

                titre_verifie = self.recursive_verify(titre)

        # Si aucunes series detectees, on detecte un film en trouvant une annee entre 1920 et 2029
        if titre == None:
            movie_year = re.search(r"(19[2-9][0-9]|20[0-2][0-9])",
                                   self._nom_fichier)

        # S'il detecte une annee il pourra etre change pour une serie avec verification TMDB
        if movie_year:

            self.type = "Film"

            index_debut, index_fin = movie_year.span()

            movie_year = movie_year.group()

            titre = str(self._nom_fichier[:index_debut - 1])
            titre = self.purify(titre)
            titre = string.capwords(titre)

            if debug:
                print('Regex detecte', titre, self.type)

            titre_verifie = self.verify(titre)

            # S'il trouve sur TMDB on remplace
            if titre_verifie:
                titre, self.type, movie_year, genre = titre_verifie

            # Sinon on essaie la recherche recursive
            elif not titre_verifie:
                recherche_recursive = self.recursive_verify(titre)
                # S'il trouve, on remplace
                if recherche_recursive:
                    titre, self.type, movie_year, genre = recherche_recursive

            # Si la recherche recursive ne fonctionne pas on essaie sur IMDB
            elif not recherche_recursive:
                result_search_imdb = self.search_imdb(titre)
                if result_search_imdb:
                    titre = result_search_imdb

            # On deplaces avec les valeurs finales
            self.move_file(titre, self.type, None, movie_year, genre)

        # Si les regex n'ont pas trouve de series ou film on fait une recherche recursive sur tmdb.org
        if titre == None:

            if debug:
                print('Les REGEX n\'ont pas fonctionne pour',
                      self._nom_fichier, 'On essaie une recherche recursive')
            purified_name = self.purify(self._nom_fichier)
            titre_verifie = self.recursive_verify(purified_name)

            # S'il trouve on remplace
            if titre_verifie:

                if titre_verifie[1] == 'Film':
                    titre, self.type, movie_year, genre = titre_verifie

                    # On deplaces avec les valeurs finales
                    self.move_file(titre, self.type, None, movie_year, genre)

        # Si rien ne fonctionne on envoie au Purgatoire
        if self.type == None and self._nom_fichier != os.path.basename(
                __file__) and self._extension not in self.indesirables:

            print('Type non-detecte, au Purgatoire: ' + self._nom_fichier)

            if not simulation and operation_mode == "local":
                os.rename(self._path_complet,
                          path.join(dossier_Purgatoire, self._nom_fichier))
Example #48
0
 def _prep(value):
     return [
         text.unescape(string.capwords(v))
         for v in text.extract_iter(value or "", '.html">', '<')
     ]
def Scraping_Company_Deatils():
    browser = webdriver.Chrome(executable_path=str('C:\\chromedriver.exe'))
    browser.maximize_window()
    # browser.get('https://servicedirectory.itij.com/')
    # time.sleep(2)
    a = 1

    # company_links = []
    # menu_links_list = []

    # for menu_links in browser.find_elements_by_xpath('/html/body/div[2]/section/div[2]/div/div/div/div/div/div/div/div/a'):
    #     menu_links_list.append(menu_links.get_attribute('href').strip())

    # for menu_link in menu_links_list:
    #     browser.get(menu_link)
    #     time.sleep(2)
    #     for title in browser.find_elements_by_xpath('//*[@id="content-grids"]/div/div/div[2]/div[1]/h4/a'):
    #         text = ''
    #         text += f"{title.get_attribute('href').strip()}####"
    #         text += f"{title.get_attribute('innerText').strip()}"
    #         company_links.append(text)
    # file1 = open("F:\\links.txt","w")
    # for link in company_links:
    #     file1.write(f"{str(link)}\n")
    # file1.close() #to change file access modes

    f = open("C:\\Source Tender Links\\links.txt", "r")
    # f = open("F:\\links.txt", "r")
    links = f.read()
    All_Links = links.splitlines()

    for links in All_Links:
        company_href = links.partition('####')[0].strip()
        Company_name = links.partition('####')[2].strip()
        browser.get(company_href)
        time.sleep(2)

        detail_html = ''
        for detail_html in browser.find_elements_by_xpath(
                '//*[@class="kt-svg-icon-list"]'):
            detail_html = detail_html.get_attribute('outerHTML').replace(
                '\n', '').replace('<br>', ', ').strip()
            break
        detail_html = re.sub('\s+', ' ', detail_html)
        detail_html = html.unescape(detail_html)

        Email_list = re.findall(
            "([a-zA-Z0-9_.+-]+@[a-zA-Z0-9_.+-]+\.[a-zA-Z]+)", detail_html)
        if Email_list != 0:
            main_email = []
            [main_email.append(i) for i in Email_list if i not in main_email]

            for Email in main_email:

                # company_href = links[0]
                print(f'Company Link {str(a)} : ', company_href)

                # Company_name = links[1]
                if len(Company_name) >= 200:
                    Company_name = str(Company_name)[:200] + '...'
                print('Company Name: ', Company_name)

                print('Email: ', Email)

                Contact_name = ''
                for Contact_name_text in browser.find_elements_by_xpath(
                        '//*[@class="kt-svg-icon-list"]/li[1]/span'):
                    Contact_name = Contact_name_text.get_attribute(
                        'innerText').strip()
                    break
                if len(Contact_name) >= 95:
                    Contact_name = str(Contact_name)[:95] + '...'
                Contact_name = string.capwords(Contact_name)
                print('Contact Name: ', Contact_name)

                Address = ''
                for Address_text in browser.find_elements_by_xpath(
                        '//*[@id="lp-respo-direc"]'):
                    Address = Address_text.get_attribute('innerText').replace(
                        '\n', ' ').strip()
                    break
                print('Address: ', Address)

                Tel = ''
                for Tel_text in browser.find_elements_by_xpath(
                        '//*[@class="kt-svg-icon-list"]/li/span'):
                    Tel_text_main = Tel_text.get_attribute('innerText').strip()
                    if '+' in Tel_text_main:
                        Tel = Tel_text_main.strip()
                        break

                print('Tel: ', Tel)

                country_list = Address.split(',')
                country = country_list[-1].strip()
                if 'UK' in country:
                    country = 'UK'
                else:
                    country = Get_country_code(country)
                    if str(country) == 'None' or str(country) == 'NONE':
                        country = ''
                print('country: ', country)

                website = ''
                for website_text in browser.find_elements_by_xpath(
                        '//*[@class="kt-svg-icon-list"]/li/span'):
                    website_text_main = website_text.get_attribute(
                        'innerText').strip()
                    if 'www.' in website_text_main:
                        website = website_text_main.strip()
                        break

                print('website: ', website)

                product_and_service = ''
                for product_and_service_text in browser.find_elements_by_xpath(
                        '//*[@id="page"]/section/div[2]/div/div[1]/div[1]/p/strong/span[2]'
                ):
                    product_and_service = product_and_service_text.get_attribute(
                        'innerText').strip()
                    break
                print('product_and_service: ', product_and_service)

                insert_details(company_href, Company_name, Contact_name,
                               Address, Tel, country, website,
                               product_and_service, Email)

                print(
                    '\n==================================================== \n'
                )
                a += 1
    wx.MessageBox('All Process Are Done', 'servicedirectory.itij.com',
                  wx.OK | wx.ICON_INFORMATION)
    print('All Process Are Done')
    browser.close()
    sys.exit()
def scrap_data(Tender_id, Document, start_date, Deadline, SCHEDULED_DATE,
               Title):
    SegField = []
    for data in range(45):
        SegField.append('')
    a = True
    while a == True:
        try:

            SegField[1] = '*****@*****.**'
            SegField[
                2] = '4632 Wisconsin Ave, NW, Washington, DC, 20016-4622, USA, Tel : (202)244-5010'
            SegField[8] = 'http://www.cebw.org/'
            SegField[12] = 'BRAZILIAN ARMY COMMISSION (BAC)'
            SegField[13] = Tender_id
            SegField[19] = string.capwords(str(Title)).replace('`', '')
            SegField[5] = Document
            SegField[
                18] = f'{SegField[19]}<br>\nStart Date: {start_date}<br>\nSubmitting Initial Proposal: {Deadline}<br>\nScheduled Date: {SCHEDULED_DATE}'

            Deadline = Deadline.replace('AM', '').replace('PM', '').strip()
            datetime_object = datetime.strptime(Deadline, '%m/%d/%Y %H:%M:%S')
            Deadline = datetime_object.strftime("%Y-%m-%d")
            SegField[24] = Deadline.strip()
            SegField[14] = '2'
            SegField[22] = "0"
            SegField[26] = "0.0"
            SegField[27] = "0"  # Financier
            SegField[7] = 'BR'
            SegField[28] = 'http://www.cebw.org/en/biddings-in-progress'
            SegField[31] = 'cebw.org'
            SegField[20] = ""
            SegField[21] = ""
            SegField[42] = SegField[7]
            SegField[43] = ""

            for SegIndex in range(len(SegField)):
                print(SegIndex, end=' ')
                print(SegField[SegIndex])
                SegField[SegIndex] = html.unescape(str(SegField[SegIndex]))
                SegField[SegIndex] = str(SegField[SegIndex]).replace("'", "''")

            if len(SegField[19]) >= 200:
                SegField[19] = str(SegField[19])[:200] + '...'

            if len(SegField[18]) >= 1500:
                SegField[18] = str(SegField[18])[:1500] + '...'

            if SegField[19] == '':
                wx.MessageBox(' Short Desc Blank ', 'cebw.org',
                              wx.OK | wx.ICON_INFORMATION)
            else:
                check_date(SegField)
                # pass
            a = False
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print("Error ON : ",
                  sys._getframe().f_code.co_name + "--> " + str(e), "\n",
                  exc_type, "\n", fname, "\n", exc_tb.tb_lineno)
            a = True
            time.sleep(5)
Example #51
0
def just_do_it(text):
    return capwords(text)
Example #52
0
    def addItemToCards(self, item):
        "This method actually do conversion"

        # new anki card
        note = ForeignNote()

        # clean Q and A
        note.fields.append(
            self._fudgeText(self._decode_htmlescapes(item.Question)))
        note.fields.append(
            self._fudgeText(self._decode_htmlescapes(item.Answer)))
        note.tags = []

        # pre-process scheduling data
        # convert learning data
        if (not self.META.resetLearningData and int(item.Interval) >= 1
                and getattr(item, "LastRepetition", None)):
            # migration of LearningData algorithm
            tLastrep = time.mktime(
                time.strptime(item.LastRepetition, '%d.%m.%Y'))
            tToday = time.time()
            card = ForeignCard()
            card.ivl = int(item.Interval)
            card.lapses = int(item.Lapses)
            card.reps = int(item.Repetitions) + int(item.Lapses)
            nextDue = tLastrep + (float(item.Interval) * 86400.0)
            remDays = int((nextDue - time.time()) / 86400)
            card.due = self.col.sched.today + remDays
            card.factor = int(
                self._afactor2efactor(float(item.AFactor.replace(',', '.'))) *
                1000)
            note.cards[0] = card

        # categories & tags
        # it's worth to have every theme (tree structure of sm collection) stored in tags, but sometimes not
        # you can deceide if you are going to tag all toppics or just that containing some pattern
        tTaggTitle = False
        for pattern in self.META.pathsToBeTagged:
            if item.lTitle is not None and pattern.lower() in " ".join(
                    item.lTitle).lower():
                tTaggTitle = True
                break
        if tTaggTitle or self.META.tagAllTopics:
            # normalize - remove diacritic punctuation from unicode chars to ascii
            item.lTitle = [self._unicode2ascii(topic) for topic in item.lTitle]

            # Transfrom xyz / aaa / bbb / ccc on Title path to Tag  xyzAaaBbbCcc
            #  clean things like [999] or [111-2222] from title path, example: xyz / [1000-1200] zyx / xyz
            #  clean whitespaces
            #  set Capital letters for first char of the word
            tmp = list(
                set([
                    re.sub(r'(\[[0-9]+\])', ' ', i).replace('_', ' ')
                    for i in item.lTitle
                ]))
            tmp = list(set([re.sub(r'(\W)', ' ', i) for i in tmp]))
            tmp = list(set([re.sub('^[0-9 ]+$', '', i) for i in tmp]))
            tmp = list(set([capwords(i).replace(' ', '') for i in tmp]))
            tags = [j[0].lower() + j[1:] for j in tmp if j.strip() != '']

            note.tags += tags

            if self.META.tagMemorizedItems and int(item.Interval) > 0:
                note.tags.append("Memorized")

            self.logger('Element tags\t- ' + repr(note.tags), level=3)

        self.notes.append(note)
Example #53
0
    def send(cls, r, resource):
        """
            Method to retrieve updates for a subscription, render the
            notification message and send it - responds to POST?format=msg
            requests to the respective resource.

            Args:
                r: the S3Request
                resource: the S3Resource
        """

        _debug = current.log.debug
        _debug("S3Notifications.send()")

        json_message = current.xml.json_message

        # Read subscription data
        source = r.body
        source.seek(0)
        data = source.read()
        subscription = json.loads(data)

        #_debug("Notify PE #%s by %s on %s of %s since %s" % \
        #           (subscription["pe_id"],
        #            str(subscription["method"]),
        #            str(subscription["notify_on"]),
        #            subscription["resource"],
        #            subscription["last_check_time"],
        #            ))

        # Check notification settings
        notify_on = subscription["notify_on"]
        methods = subscription["method"]
        if not notify_on or not methods:
            return json_message(message="No notifications configured "
                                "for this subscription")

        # Authorization (pe_id must not be None)
        pe_id = subscription["pe_id"]

        if not pe_id:
            r.unauthorised()

        # Fields to extract
        fields = resource.list_fields(key="notify_fields")
        if "created_on" not in fields:
            fields.append("created_on")

        # Extract the data
        data = resource.select(fields, represent=True, raw_data=True)
        rows = data["rows"]

        # How many records do we have?
        numrows = len(rows)
        if not numrows:
            return json_message(message="No records found")

        #_debug("%s rows:" % numrows)

        # Prepare meta-data
        get_config = resource.get_config
        settings = current.deployment_settings

        page_url = subscription["page_url"]

        crud_strings = current.response.s3.crud_strings.get(resource.tablename)
        if crud_strings:
            resource_name = crud_strings.title_list
        else:
            resource_name = string.capwords(resource.name, "_")

        last_check_time = s3_decode_iso_datetime(
            subscription["last_check_time"])

        email_format = subscription["email_format"]
        if not email_format:
            email_format = settings.get_msg_notify_email_format()

        filter_query = subscription.get("filter_query")

        meta_data = {
            "systemname": settings.get_system_name(),
            "systemname_short": settings.get_system_name_short(),
            "resource": resource_name,
            "page_url": page_url,
            "notify_on": notify_on,
            "last_check_time": last_check_time,
            "filter_query": filter_query,
            "total_rows": numrows,
        }

        # Render contents for the message template(s)
        renderer = get_config("notify_renderer")
        if not renderer:
            renderer = settings.get_msg_notify_renderer()
        if not renderer:
            renderer = cls._render

        contents = {}
        if email_format == "html" and "EMAIL" in methods:
            contents["html"] = renderer(resource, data, meta_data, "html")
            contents["default"] = contents["html"]
        if email_format != "html" or "EMAIL" not in methods or len(
                methods) > 1:
            contents["text"] = renderer(resource, data, meta_data, "text")
            contents["default"] = contents["text"]

        # Subject line
        subject = get_config("notify_subject")
        if not subject:
            subject = settings.get_msg_notify_subject()
        if callable(subject):
            subject = subject(resource, data, meta_data)

        from string import Template
        subject = Template(subject).safe_substitute(S="%(systemname)s",
                                                    s="%(systemname_short)s",
                                                    r="%(resource)s")
        subject = subject % meta_data

        # Attachment
        attachment = subscription.get("attachment", False)
        document_ids = None
        if attachment:
            attachment_fnc = settings.get_msg_notify_attachment()
            if attachment_fnc:
                document_ids = attachment_fnc(resource, data, meta_data)

        # **data for send_by_pe_id function in s3msg
        send_data = {}
        send_data_fnc = settings.get_msg_notify_send_data()
        if callable(send_data_fnc):
            send_data = send_data_fnc(resource, data, meta_data)

        # Helper function to find message templates from a priority list
        join = lambda *f: os.path.join(current.request.folder, *f)

        def get_msg_template(path, filenames):
            for fn in filenames:
                filepath = join(path, fn)
                if os.path.exists(filepath):
                    try:
                        return open(filepath, "rb")
                    except:
                        pass
            return None

        # Render and send the message(s)
        templates = settings.get_template()
        if templates != "default" and not isinstance(templates, (tuple, list)):
            templates = (templates, )
        prefix = resource.get_config("notify_template", "notify")

        send = current.msg.send_by_pe_id

        success = False
        errors = []

        for method in methods:

            error = None

            # Get the message template
            msg_template = None
            filenames = ["%s_%s.html" % (prefix, method.lower())]
            if method == "EMAIL" and email_format:
                filenames.insert(0,
                                 "%s_email_%s.html" % (prefix, email_format))
            if templates != "default":
                for template in templates[::-1]:
                    path = join("modules", "templates", template, "views",
                                "msg")
                    msg_template = get_msg_template(path, filenames)
                    if msg_template is not None:
                        break
            if msg_template is None:
                path = join("views", "msg")
                msg_template = get_msg_template(path, filenames)
            if msg_template is None:
                msg_template = StringIO(
                    s3_str(current.T("New updates are available.")))

            # Select contents format
            if method == "EMAIL" and email_format == "html":
                output = contents["html"]
            else:
                output = contents["text"]

            # Render the message
            try:
                message = current.response.render(msg_template, output)
            except:
                exc_info = sys.exc_info()[:2]
                error = ("%s: %s" % (exc_info[0].__name__, exc_info[1]))
                errors.append(error)
                continue
            finally:
                if hasattr(msg_template, "close"):
                    msg_template.close()

            if not message:
                continue

            # Send the message
            #_debug("Sending message per %s" % method)
            #_debug(message)
            try:
                sent = send(
                    pe_id,
                    # RFC 2822
                    subject=s3_truncate(subject, 78),
                    message=message,
                    contact_method=method,
                    system_generated=True,
                    document_ids=document_ids,
                    **send_data)
            except:
                exc_info = sys.exc_info()[:2]
                error = ("%s: %s" % (exc_info[0].__name__, exc_info[1]))
                sent = False

            if sent:
                # Successful if at least one notification went out
                success = True
            else:
                if not error:
                    error = current.session.error
                    if isinstance(error, list):
                        error = "/".join(error)
                if error:
                    errors.append(error)

        # Done
        if errors:
            message = ", ".join(errors)
        else:
            message = "Success"
        return json_message(success=success,
                            statuscode=200 if success else 403,
                            message=message)
Example #54
0
    def OnSelectStyle(self, event):
        current = self.boxStyle.GetSelection()
        seltext = self.boxStyle.GetStringSelection()

        if self.last > -1:
            if self.ChangeSpec == 0:
                self.targetArray[self.last] = self.GetStyleString()
            elif self.ChangeSpec == 1:
                self.targetArray[self.last] = self.GetForeground()
            elif self.ChangeSpec == 2:
                self.targetArray[self.last] = self.GetColorString()
            elif self.ChangeSpec == 3:
                self.targetArray[self.last] = self.GetBackground()

        self.ChangeSpec = 0

        stylestring = self.targetArray[current]
        if (seltext == "Caret Foreground") or (
                seltext == "Long Line Indicator") or (seltext
                                                      == "Indentation Guide"):
            self.ChangeSpec = 1
            stylestring = "fore:" + stylestring
        elif (seltext == "Selection") or (seltext == "Folding"):
            self.ChangeSpec = 2
        elif seltext == "Current Line Highlight":
            self.ChangeSpec = 3
            stylestring = "back:" + stylestring

        self.font = getStyleProperty("face", stylestring)
        if not self.font:
            self.font = getStyleProperty("face", self.targetArray[0])
        self.size = getStyleProperty("size", stylestring)
        if not self.size:
            self.size = getStyleProperty("size", self.targetArray[0])
        self.foreground = getStyleProperty("fore", stylestring)
        if not self.foreground:
            self.foreground = getStyleProperty("fore", self.targetArray[0])
        self.background = getStyleProperty("back", stylestring)
        if not self.background:
            self.background = getStyleProperty("back", self.targetArray[0])
        self.bold = getStyleProperty("bold", stylestring)
        self.italic = getStyleProperty("italic", stylestring)
        self.underline = getStyleProperty("underline", stylestring)

        if self.ChangeSpec > 0:
            self.font = getStyleProperty("face", self.targetArray[0])
            self.size = getStyleProperty("size", self.targetArray[0])
            if self.ChangeSpec == 1:
                self.background = getStyleProperty("back", self.targetArray[0])
            elif self.ChangeSpec == 3:
                self.foreground = getStyleProperty("fore", self.targetArray[0])

        if self.font not in self.FontList:
            f1 = string.capwords(self.font)
            f2 = string.lower(self.font)
            if f1 in self.FontList:
                self.font = f1
            elif f2 in self.FontList:
                self.font = f2

        if self.font not in self.FontList:
            old = self.font
            self.size = '12'
            options = ['Courier', 'Courier 10 Pitch', 'Monospace', 'Sans', '']
            for font in options:
                if font in self.FontList:
                    self.font = font
                    break
            #I don't know why this a traceback: no foreground !!!
            #drScrolledMessageDialog.ShowMessage(self, ("Default font [%s] not found! \nChoosed [%s] instead." %(old,self.font)), "Error")
            print "Default font [%s] not found! \nChoosed [%s] instead." % (
                old, self.font)

        self.txtPreview.StyleResetDefault()
        self.txtPreview.StyleSetSpec(
            wx.stc.STC_STYLE_DEFAULT,
            ("fore:" + self.foreground + ",back:" + self.background +
             ",size:" + self.size + ",face:" + self.font + "," + self.bold +
             "," + self.italic + "," + self.underline))
        self.txtPreview.StyleClearAll()
        self.txtPreview.StartStyling(0, 0xff)

        try:
            #self.boxFonts.SetStringSelection(self.font)
            i = self.boxFonts.FindString(self.font)
            if i < 0:
                i = 0
            self.boxFonts.Select(i)
            #self.boxFonts.EnsureVisible(i) # Bug: Doesn't work
            self.boxFonts.SetFirstItem(i)
        except:
            drScrolledMessageDialog.ShowMessage(self, (
                "Something awful happened trying to \nset the font to the default."
            ), "Error")
            self.boxFonts.SetSelection(0)

        try:
            tsizearray = [
                '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18',
                '19', '20', '21', '22', '23', '24', '25', '26', '27', '28',
                '29', '30'
            ]
            if not self.size in tsizearray:
                self.boxSize.SetValue(self.size)
            else:
                i = tsizearray.index(self.size)
                self.boxSize.SetSelection(i)
        except:
            drScrolledMessageDialog.ShowMessage(self, (
                "Something awful happened trying to \nset the font to the default."
            ), "Error")
            self.boxSize.SetSelection(0)

        self.OnSizeSelect(event)

        self.fgPanel.SetValue(self.foreground)
        self.bgPanel.SetValue(self.background)

        self.chkBold.SetValue((len(self.bold) > 0))
        self.chkItalic.SetValue((len(self.italic) > 0))
        self.chkUnderline.SetValue((len(self.underline) > 0))

        self.boxFonts.Enable(self.ChangeSpec == 0)
        self.boxSize.Enable(self.ChangeSpec == 0)
        self.chkBold.Enable(self.ChangeSpec == 0)
        self.chkItalic.Enable(self.ChangeSpec == 0)
        self.chkUnderline.Enable(self.ChangeSpec == 0)
        if self.ChangeSpec == 1:
            self.fgPanel.Enable(True)
            self.bgPanel.Enable(False)
        elif self.ChangeSpec == 3:
            self.fgPanel.Enable(False)
            self.bgPanel.Enable(True)
        else:
            self.fgPanel.Enable(True)
            self.bgPanel.Enable(True)

        self.last = current
Example #55
0
def prettify_permission_name(perm_name: str) -> str:
    pretty_perm_name = string.capwords(
        f"{perm_name}".replace('_', ' ')
    )  # Capitalize the permission names and replace underlines with spaces.
    pretty_perm_name = "Send TTS Messages" if pretty_perm_name == "Send Tts Messages" else pretty_perm_name  # Mak sure that we capitalize the TTS acronym properly.
    return pretty_perm_name
Example #56
0
    def __init__(self, *args, **kwargs):
        self.main_headline = None
        if "type" in kwargs and kwargs["type"] == "charity":

            # Create the words necessary for the headline

            hSubject = NounAnimate(
                randomDict([CSV_RELATIVE_PATH + "/rhg_nAnimate.csv"]),
                randomQuantity())
            hAction = Verb(randomDict([CSV_RELATIVE_PATH + "/rhg_verbs.csv"]))
            hActionTense = ""
            hObject = Noun(
                randomDict([CSV_RELATIVE_PATH + "/rhg_nInanimate.csv"]),
                randomQuantity())

            # Assign the appropriate tense of the verb based on whether or not
            # the subject is singular/plural

            if hSubject.form == "singular":
                hActionTense = hAction.present

            if hSubject.form == "plural":
                hActionTense = hAction.infinitive

            # Assemble the headline using the words

            mainHeadline = "{subject} {action} {theObject} For Charity"

            mainHeadlineFormat = mainHeadline.format(
                subject=hSubject.name,
                action=hActionTense,
                theObject=hObject.name,
            )

            # Create the words necessary for the first sentence

            hSubjectArticle = hSubject.article
            hSubjectQuantity = hSubject.quantity
            hObjectQuantifier = hObject.quantifiers
            hObjectAdjective = Adjective(
                randomDict([CSV_RELATIVE_PATH + "/rhg_adjectives.csv"]))
            hPlace = Noun(randomDict([CSV_RELATIVE_PATH + "/rhg_nPlaces.csv"]))
            hPlaceAdjective = Adjective(
                randomDict([CSV_RELATIVE_PATH + "/rhg_adjectives.csv"]))
            hFoundationCity = Noun(
                randomDict([CSV_RELATIVE_PATH + "/rhg_nPlaces.csv"],
                           {"uscapital": "1"}))
            hFoundationObject = Noun(
                randomDict([CSV_RELATIVE_PATH + "/rhg_nInanimate.csv"]),
                randomQuantity())
            hFoundationSuffix = random.choice([
                "Foundation", "Institute", "Alliance", "Hospital",
                "Association", "Conservancy", "Society", "Trust", "Committee",
                "Fund"
            ])

            # if the subject is a singular common noun use its article
            # if the subject is a proper noun, ignore..

            if hSubject.form == "singular" and hSubject.proper == "0":
                hSubjectArticle = hSubjectArticle
            else:
                hSubjectArticle = ""

            # If there is only one subject, don't indicate the quantity

            if hSubjectQuantity == 1:
                hSubjectQuantity = ""

            # If there is more than one subject, take the int quantity and
            # convert it to a string (i.e. 4 to four)

            elif hSubjectQuantity > 1:
                hSubjectQuantity = numberToString(str(hSubject.quantity))

            # Assemble the 1st sentence using the words
            #
            # Ex: Today seven police officers destroyed some naughty couches
            # on a cruise ship for a local charity.

            firstSentence = (
                "Today {subjectArticle} {subjectQuantity} {subject} {actionPast}"
                + " {objectQuantifier} {theObject} {placePreposition}" +
                " {placeArticle} {place} for the {foundationCity} {foundationObject} {foundationSuffix}. "
            )

            firstSentenceFormat = firstSentence.format(
                subjectArticle=hSubjectArticle,
                subjectQuantity=hSubjectQuantity,
                subject=hSubject.name,
                actionPast=hAction.past,
                objectQuantifier=hObjectQuantifier,
                theObject=hObject.name,
                placePreposition=hPlace.prepositions,
                placeArticle=hPlace.article,
                place=hPlace.name,
                foundationCity=hFoundationCity.name,
                foundationObject=hFoundationObject.singular.capitalize(),
                foundationSuffix=hFoundationSuffix)

            # Create the words necessary for the 2nd sentence

            hAttendanceAmount = random.randint(2, 998)
            hDollarsInt = weightedDonationAmount()
            hDollars = dollars(hDollarsInt)
            hDollarsAdjective = [
                "a paltry", "a dissapointing", "an astonishing"
            ]

            # Choose an adjective for the dollar amount based on how high are low it is..

            if hDollarsInt < 500:
                hDollarsAdjective = hDollarsAdjective[0]

            elif hDollarsInt >= 500 and hDollarsInt <= 2500:
                hDollarsAdjective = hDollarsAdjective[1]

            elif hDollarsInt >= 500000:
                hDollarsAdjective = hDollarsAdjective[2]

            else:
                hDollarsAdjective = ""

            # Assemble the 2nd sentence using the words

            secondSentence = (
                "The event, which garnered an average attendance of {attendanceAmount}"
                +
                " people, was said to have raised {dollarsAdjective} {dollars}. "
            )

            secondSentenceFormat = secondSentence.format(
                attendanceAmount=hAttendanceAmount,
                dollarsAdjective=hDollarsAdjective,
                dollars=hDollars)

            # Create the words necessary for the 3rd sentence

            hSubjectPronoun1st = Pronoun(hSubject, pov="1st")
            hPlacePronoun3rd = Pronoun(hPlace, pov="3rd")
            hPlacePronounDemonstrativeClose = hPlacePronoun3rd.demonstrativeClose
            hObjectPronoun3rd = Pronoun(hObject, pov="3rd")
            hObjectPronoun3rdDemonstrativeClose = hObjectPronoun3rd.demonstrativeClose
            hSubjectQuantifier = ""

            # If the object is not countable, use "this" as its close demonstrative pronoun,
            # because otherwise it sounds strange.. i.e. : Given the object blood, it would write
            # "People love to watch me eat these blood" if blood were plural

            if hPlace.proper == "1":
                hPlacePronounDemonstrativeClose = ""

            if hObject.countable == "0":
                hObjectPronoun3rdDemonstrativeClose = "this"

            if hSubject.form == "singular" and hSubject.proper == "0":
                hSubjectQuantifier = "the"

            if hSubject.form == "plural":
                hSubjectQuantifier = "one of the"

            # Assemble the 3rd sentence using the words

            thirdSentence = (
                "{beginQuote}{subjectPronoun1stHave} been coming to {placePronounDemonstrative} "
                +
                "{place} for {numberOfYears} years. People love to watch {subjectPronoun1stObject} "
                +
                "{actionInfinitive} {objectPronoun3rdDemonstrative} {theobject} and {SubjectPronoun1stSubject} "
                +
                "love every second of it!{endQuote}, explained {subjectQuantifier} {subject}."
            )

            #thirdSentence = ("{pronoun1stHave} been coming to {this} {place} for {numberOfYears} years. " +
            #                 "People love to watch {Pronoun} {actionInfinitive} {thisThese} {object} and " +
            #                 "{Pronoun} {emotion} every second of it.")

            thirdSentenceFormat = thirdSentence.format(
                beginQuote="\"",
                subjectPronoun1stHave=hSubjectPronoun1st.have.capitalize(),
                placePronounDemonstrative=hPlacePronounDemonstrativeClose,
                place=hPlace.name,
                numberOfYears=str(random.randint(2, 79)),
                subjectPronoun1stObject=hSubjectPronoun1st.object,
                actionInfinitive=hAction.infinitive,
                objectPronoun3rdDemonstrative=
                hObjectPronoun3rdDemonstrativeClose,
                theobject=hObject.name,
                SubjectPronoun1stSubject=hSubjectPronoun1st.subject,
                subjectQuantifier=hSubjectQuantifier,
                subject=hSubject.name,
                endQuote="\"")

            # Remove extra spaces

            if "   " in firstSentenceFormat:
                # print("\nthere's a triple space in the sentence.. repairing.\n")
                firstSentenceFormat = firstSentenceFormat.replace("   ", " ")

            if "  " in firstSentenceFormat:
                # print("\nthere's a double space in the sentence.. repairing.\n")
                firstSentenceFormat = firstSentenceFormat.replace("  ", " ")

            if "  " in secondSentenceFormat:
                # print("\nthere's a double space in the sentence.. repairing.\n")
                secondSentenceFormat = secondSentenceFormat.replace("  ", " ")

            if "  " in secondSentenceFormat:
                # print("\nthere's a double space in the sentence.. repairing.\n")
                secondSentenceFormat = secondSentenceFormat.replace("  ", " ")

            if "   " in thirdSentenceFormat:
                # print("\nthere's a triple space in the sentence.. repairing.\n")
                thirdSentenceFormat = thirdSentenceFormat.replace("   ", " ")

            if "  " in thirdSentenceFormat:
                # print("\nthere's a double space in the sentence.. repairing.\n")
                thirdSentenceFormat = thirdSentenceFormat.replace("  ", " ")

            print("\n")

            print("----Today's News----")

            print("\n")

            print(string.capwords(mainHeadlineFormat))

            print("\n")

            print(firstSentenceFormat + secondSentenceFormat +
                  thirdSentenceFormat)

            print("\n")

            self.main_headline = string.capwords(mainHeadlineFormat)

            self.blurb = firstSentenceFormat
Example #57
0
            def submit1():

                l2 = Label(f1,
                           text='Sir please select the name of the student: ',
                           bg='black',
                           fg='white',
                           font=(10))
                l2.grid(row=1, column=0, sticky='W', padx=10)

                data = pd.read_excel('Students List.xlsx').set_index('Name')

                n = StringVar()

                if boxn.get().isdigit():
                    cl = int(boxn.get())

                else:
                    cl = string.capwords(boxn.get())

                box1 = ttk.Combobox(
                    f1,
                    textvariable=n,
                    values=data[data['Class'] == cl].index.tolist())
                box1.grid(row=1, column=1, sticky='E', padx=10)

                def submit2():

                    l3 = Label(f2,
                               text='Name: ',
                               bg='black',
                               fg='yellow',
                               font=(10))
                    l3.grid(row=2, column=0, sticky='W')

                    l4 = Label(f2,
                               text=box1.get(),
                               bg='black',
                               fg='white',
                               font=(10))
                    l4.grid(row=2, column=1, sticky='W')

                    l5 = Label(f2,
                               text='Class: ',
                               bg='black',
                               fg='yellow',
                               font=(10))
                    l5.grid(row=3, column=0, sticky='W')

                    l6 = Label(f2,
                               text=data.loc[box1.get()][0],
                               bg='black',
                               fg='white',
                               font=(10))
                    l6.grid(row=3, column=1, sticky='W')

                    l7 = Label(f2,
                               text='School ',
                               bg='black',
                               fg='yellow',
                               font=(10))
                    l7.grid(row=4, column=0, sticky='W')

                    l8 = Label(f2,
                               text=data.loc[box1.get()][1],
                               bg='black',
                               fg='white',
                               font=(10))
                    l8.grid(row=4, column=1, sticky='W')

                    l9 = Label(f2,
                               text='Contact Information: ',
                               bg='black',
                               fg='yellow',
                               font=(10))
                    l9.grid(row=5, column=0, sticky='W')

                    l10 = Label(f2,
                                text=data.loc[box1.get()][2],
                                bg='black',
                                fg='white',
                                font=(10))
                    l10.grid(row=5, column=1, sticky='W')

                    l11 = Label(f2,
                                text='Date Of Admission: ',
                                bg='black',
                                fg='yellow',
                                font=(10))
                    l11.grid(row=6, column=0, sticky='W')

                    l12 = Label(f2,
                                text=data.loc[box1.get()][3],
                                bg='black',
                                fg='white',
                                font=(10))
                    l12.grid(row=6, column=1, sticky='W')

                    l13 = Label(f2,
                                text='Remarks: ',
                                bg='black',
                                fg='yellow',
                                font=(10))
                    l13.grid(row=7, column=0, sticky='W')

                    l14 = Label(f2,
                                text=data.loc[box1.get()][4],
                                bg='black',
                                fg='white',
                                font=(10))
                    l14.grid(row=7, column=1, sticky='W')

                    def submit2():
                        stu_rec.destroy()
                        Record()

                    b2 = ttk.Button(f1, text='Continue', command=submit2)
                    b2.grid(row=8, column=3)

                    b2 = ttk.Button(f1, text='Exit', command=stu_rec.destroy)
                    b2.grid(row=9, column=3)

                b2 = ttk.Button(f1, text='Submit', command=submit2)
                b2.grid(row=1, column=2)
Example #58
0
def capwords_text(text):
    return capwords(text)
Example #59
0
        def submit1():

            try:

                data = pd.read_excel('Students List.xlsx')['Name'].tolist()
                if string.capwords(e1.get()) in data:

                    messagebox.showwarning(
                        'Name Repetetion',
                        'Sir a student by this name has already registered so kindly change the name to avoid any confusion.'
                    )
                    return None

            except FileNotFoundError():
                print('File Students List.xlsx not existing')
                pass

            if e1.get() == '':

                messagebox.showerror(
                    'Name please',
                    'Sir you forgot to enter a name so kindly enter a name.')
                return None

            if e2.get() == '':

                messagebox.showerror(
                    'Class please',
                    'Sir you forgot to enter class ,so kindly enter a class number.'
                )
                return None

            if e4.get().isdigit() == False:

                warn = messagebox.showerror(
                    'Invalid Contact Number',
                    'Sir the contact number entered is not valid so kindly change the number.'
                )

                return None

            if os.path.isfile('Students List.xlsx'):

                data = pd.read_excel('Students List.xlsx')
                data=data.append({'Name':string.capwords(e1.get()),'Class':e2.get(),'School':string.capwords(e3.get()),\
                                    'Contact Number':e4.get(),\
                                        'Date Of Joining':current_date(),'Remarks':string.capwords(e5.get())},ignore_index=True)
                data.set_index('Name', inplace=True)

                os.remove('Students List.xlsx')

                print('Appended Sir')

            else:

                data=pd.DataFrame({'Name':[string.capwords(e1.get())],\
                                    'Class':[e2.get()],'School':[string.capwords(e3.get())],\
                                    'Contact Number':[e4.get()],\
                                        'Date Of Joining':current_date(),'Remarks':[string.capwords(e5.get())]})
                data.set_index('Name', inplace=True)

            data.sort_values(by=['Class', 'Name'], inplace=True)
            data.to_excel('Students List.xlsx')
            reg.destroy()

            que = messagebox.askyesno(
                'Wanna Continue?', 'Sir do you want to register more names?')

            if que == 1:
                Registration()

            else:
                pass
Example #60
0
        def submit():

            l2 = Label(f1, text='Name: ', bg='black', fg='white', font=(10))
            l2.grid(row=1, column=0)

            if box4.get().isdigit():
                stu_class = int(box4.get())

            else:
                stu_class = string.capwords(box4.get())

            data = pd.read_excel('Students List.xlsx')
            options = data[data['Class'] == stu_class]['Name'].tolist()

            n = StringVar()
            box1 = ttk.Combobox(f1, textvariable=n, values=options)
            box1.grid(row=1, column=1, sticky='W', padx=10)

            def submit2():

                stu_name = box1.get()

                if stu_name not in options:

                    messagebox.showwarning(
                        'Invalid Name',
                        'Sir we have no student by this name so please select a correct name.'
                    )
                    box1.set('')
                    return None

                l3 = Label(f1,
                           text='Fees Deposited: ',
                           bg='black',
                           fg='white',
                           font=(10))
                l3.grid(row=2, column=0)

                e2 = Entry(f1, font=(10))
                e2.grid(row=2, column=1, padx=10, pady=10, sticky='W')

                def submit3():

                    if e2.get().isdigit() == False:

                        messagebox.showwarning(
                            'Invalid Fees',
                            'Sir please enter a valid fee amount.')
                        e2.delete(0, END)
                        return None

                    if os.path.isfile('Fee Deposits.xlsx') == False:

                        dataf = pd.DataFrame({
                            'Name': [string.capwords(stu_name)],
                            'Class': [box4.get()],
                            'Fee Deposited':
                            float(e2.get()),
                            'Date': [current_date()]
                        })
                        dataf.set_index('Name', inplace=True)
                        dataf.to_excel('Fee Deposits.xlsx')

                    else:

                        dataf = pd.read_excel('Fee Deposits.xlsx')
                        dataf=dataf.append({'Name':string.capwords(stu_name),'Class':box4.get(),'Fee Deposited':float(e2.get()),\
                                            'Date':current_date()},ignore_index=True)
                        dataf.set_index('Name', inplace=True)

                        os.remove('Fee Deposits.xlsx')
                        dataf.to_excel('Fee Deposits.xlsx')
                        print('Appended Sir')

                        l4 = Label(f1,
                                   text='Deposit Recorded Successfully.',
                                   bg='black',
                                   fg='blue',
                                   font=(10))
                        l4.grid(row=3, column=0)

                    ask = messagebox.askyesno(
                        "Continue?",
                        "Fee record of " + string.capwords(box1.get()) +
                        " stored successfully ,do you want to continue?")

                    if ask == 1:

                        dep.destroy()
                        deposit()

                    else:

                        dep.destroy()

                b3 = ttk.Button(f1, text='Submit', command=submit3)
                b3.grid(row=2, column=2)

            b2 = ttk.Button(f1, text='Submit', command=submit2)
            b2.grid(row=1, column=2)