def import_from_wiki(self, page): from getConfig import get_config import requests if not get_config('BASICS.WIKI.API_URL'): log('--> BASICS.WIKI.API_URL not found in config.json -> BASICS - Please add your WIKI_API_URL first.') return self.text_date = page.split('Notes ')[1].replace(' ', '-') # see if notes already exist, else, create if MeetingNote.objects.filter(text_date=self.text_date).exists() == False: # remove all links from bs4 import BeautifulSoup response_json = requests.get( get_config('BASICS.WIKI.API_URL')+'?action=parse&page='+page+'&format=json').json()['parse'] soup = BeautifulSoup(str(response_json['text']).replace( "{\'*\': \'", "").replace("'}", "").replace("\\n", "").replace("\\\'", "\'"), 'html.parser') for a in soup.findAll('a'): del a['href'] self.text_notes = str(soup) self.updateCreatedBasedOnName() self.save() print('Imported from wiki - '+self.text_date) else: print('Skipped - Already exists. '+self.text_date)
def start(self): print('Starting...') import os import sys import time from datetime import datetime import pytz from selenium.webdriver.common.keys import Keys from getConfig import get_config browser = openMeetingNotes() input_field = browser.find_element_by_id('innerdocbody') input_field.clear() # copy template for new meeting into riseup pad try: meeting_template = open(os.path.join( sys.path[0], 'hackerspace/Website/templates/meeting_notes__'+get_config('BASICS.NAME')+'.txt'), 'r').read() except: meeting_template = '' for line in reversed(meeting_template.split('\n')): input_field.send_keys(Keys.RETURN) line = line.replace('{{ Space }}', get_config('BASICS.NAME')) line = line.replace('{{ Date }}', str( datetime.now(pytz.timezone(get_config('PHYSICAL_SPACE.TIMEZONE_STRING'))).date())) line = line.replace('{{ MeetingNumber }}', str( MeetingNote.objects.count()+1)) time.sleep(0.3) input_field.send_keys(line) print('Done: https://pad.riseup.net/p/' + get_config('MEETINGS.RISEUPPAD_MEETING_PATH'))
def getOpenNowStatus(language): from datetime import datetime import calendar import pytz from getConfig import get_config from hackerspace.templatetags.translate import landingpage from hackerspace.models import Event # if an event is happening - show space open. Else use opening hours defined by config.json if Event.objects.QUERYSET__now(): translated_status = landingpage('Open now', language) color_indicator = 'green' else: timezone = pytz.timezone(get_config('PHYSICAL_SPACE.TIMEZONE_STRING')) today_weekday = calendar.day_name[datetime.now(timezone).weekday()] now_hour = datetime.now(timezone).hour now_minute = datetime.now(timezone).minute status = 'Unknown' for status_change in get_config( 'PHYSICAL_SPACE.OPENING_HOURS')[today_weekday]: if now_hour >= status_change[0] and now_minute >= status_change[1]: status = status_change[2] translated_status = landingpage(status, language) color_indicator = status_change[3] else: break if language == 'hebrew': return '<div dir="rtl" align="right">' + translated_status + '</div><div class="status_code_icon ' + color_indicator + ' rtl"></div>' else: return '<div class="status_code_icon ' + color_indicator + '"></div><div>' + translated_status + '</div>'
def save_wiki_photo(photo): from hackerspace.models.meetingnotes import startChrome from dateutil.parser import parse from datetime import datetime from getConfig import get_config if not get_config('BASICS.WIKI.API_URL'): log('--> BASICS.WIKI.API_URL not found in config.json -> BASICS - Please add your WIKI_API_URL first.' ) return if boolean_is_image(photo['url']) == True: # open url in selenium, test if image is on blocked list, else save low resolution image url browser = startChrome(True, photo['descriptionurl']) save_image = True try: pages_with_image = browser.find_element_by_id( 'mw-imagepage-section-linkstoimage').text.split('\n', 1)[1] for blocked_page in get_config('BASICS.WIKI.PHOTOS_IGNORE_PAGES'): if blocked_page in pages_with_image: save_image = False break except: print( 'LOG: --> mw-imagepage-section-linkstoimage not found - coudlnt check if image url is blocked' ) if save_image == False: log('--> Skipped photo. URL on WIKI.PHOTOS_IGNORE_PAGES list') elif Photo.objects.filter( url_post=photo['descriptionurl']).exists() == True: log('--> Skipped photo. Already exists.') browser.close() return 'Skipped' else: try: url_image = browser.find_element_by_class_name( 'mw-thumbnail-link').get_attribute('href') except: url_image = photo['url'] Photo( text_description_en_US=photo['canonicaltitle'] if 'canonicaltitle' in photo else None, url_image=url_image, url_post=photo['descriptionurl'], str_source='Wiki', int_UNIXtime_created=round( datetime.timestamp(parse(photo['timestamp']))), ).save() log('--> New photo saved') browser.close() return 'Saved'
def import_from_twitter_hashtag(self): log('import_from_twitter_hashtag()') from hackerspace.models.meetingnotes import startChrome import time import requests from getConfig import get_config # check if hastag exists HASHTAG = get_config('SOCIAL.HASHTAG') # check if instagram tag is saved in settings if HASHTAG: show_message( '✅ Found SOCIAL.HASHTAG - Start importing photos from Twitter with your hashtag ...' ) time.sleep(2) if requests.get('https://twitter.com/hashtag/{}?f=image'.format( HASHTAG.split('#')[1])).status_code == 200: browser = startChrome( True, 'https://twitter.com/hashtag/{}?f=image'.format( HASHTAG.split('#')[1])) else: show_message( 'WARNING: I can\'t access your SOCIAL.HASHTAG on Twitter. Is the hashtag correct? Will skip importing photos from Twitter with your hashtag for now.' ) time.sleep(4) return else: show_message( 'WARNING: Can\'t find SOCIAL.HASHTAG in your config.json. Will skip importing photos from Twitter with your hashtag for now.' ) time.sleep(4) return save_twitter_photos(browser)
def import_from_twitter(self): log('import_from_twitter()') from hackerspace.models.meetingnotes import startChrome import time import requests from getConfig import get_config # check if twitter is saved in social channels for entry in get_config('SOCIAL.SOCIAL_NETWORKS'): if 'twitter.com/' in entry['url']: show_message( '✅ Found Twitter in SOCIAL.SOCIAL_NETWORKS - Start importing photos from your Twitter page ...' ) time.sleep(2) if requests.get(entry['url']).status_code == 200: browser = startChrome(True, entry['url'] + '/media') else: show_message( 'WARNING: I can\'t access your Twitter page. Is the URL correct? Will skip importing photos from Twitter for now.' ) time.sleep(4) return break else: show_message( 'WARNING: Can\'t find Twitter in SOCIAL.SOCIAL_NETWORKS in your config.json. Will skip importing photos from Twitter for now.' ) time.sleep(4) return save_twitter_photos(browser)
def import_all_from_wiki(self): import requests from getConfig import get_config WIKI_API_URL = get_config('BASICS.WIKI.API_URL') if not WIKI_API_URL: log('--> BASICS.WIKI.API_URL not found in config.json -> BASICS - Please add your WIKI_API_URL first.') return response_json = requests.get(WIKI_API_URL + '?action=query&list=categorymembers&cmtitle=Category:Meeting_Notes&cmlimit=500&format=json').json() all_wiki_pages = [ x['title'] for x in response_json['query']['categorymembers'] if 'Meeting Notes 20' in x['title']] while 'continue' in response_json and 'cmcontinue' in response_json['continue']: response_json = requests.get(WIKI_API_URL + '?action=query&list=categorymembers&cmcontinue='+response_json['continue']['cmcontinue']+'&cmtitle=Category:Meeting_Notes&cmlimit=500&format=json').json() all_wiki_pages += [ x['title'] for x in response_json['query']['categorymembers'] if 'Meeting Notes 20' in x['title']] for meeting in all_wiki_pages: MeetingNote().import_from_wiki(meeting) print('Imported all meeting notes from wiki')
def get_main_topics(self): import os import sys import re from getConfig import get_config try: # find main topics via heading in note template main_topics = re.findall('(?<==).*', open(os.path.join( sys.path[0], 'hackerspace/Website/templates/meeting_notes__'+get_config('BASICS.NAME')+'.txt'), 'r').read()) main_topics = [ x.replace('==', '') .replace('= ', '') .replace(' =', '') .replace('=', '') .replace('[[', '') .replace(']]', '') .strip() for x in main_topics if x != '' and 'Meeting Summary' not in x and 'End of Meeting' not in x and 'Discussion Item' not in x ] return ','.join(main_topics) except: return ''
def get_convs(): # 从xiaohuangji50w_nofenci.conv中获取所有对话 gConfig = {} gConfig = getConfig.get_config() conv_path = gConfig['resource_data'] convs = [] # 用于存储对话的列表 with open(conv_path, encoding='utf-8') as f: one_conv = [] # 存储一次完整对话 for line in f: line = line.strip('\n').replace( '?', '') #去除换行符,并将原文件中已经分词的标记去掉,重新用结巴分词. line = re.sub(r"[%s]+" % punctuation, "", line) if line == '': continue if line[0] == gConfig['e']: if one_conv: convs.append(one_conv) one_conv = [] elif line[0] == gConfig['m']: tmp = line.split(' ')[1] if '=' not in tmp: # print(one_conv) one_conv.append(tmp) #将一次完整的对话存储下来 else: continue return convs
async def main(): browser = await launch(headless=True, ignoreHTTPSErrors=True, args=['--no-sandbox']) page = await browser.newPage() await page.emulate({'viewport':{'width':500,'height':500}}) await page.goto('https://'+get_config('WEBSITE.DOMAIN')+'/'+self.str_slug+'/banner') await page.screenshot({'path': filename}) await browser.close()
def delete_event_view(request): log('delete_event_view(request)') if not request.GET.get('str_slug', None) or Event.objects.filter(str_slug=request.GET.get('str_slug', None)).exists()==False: log('--> Failed: Result not found') response = JsonResponse({'success': False}) response.status_code = 404 else: from hackerspace.APIs.slack import send_message # approve event and all upcoming ones event = Event.objects.filter(str_slug=request.GET.get('str_slug', None)).first() log('--> Delete all upcoming events') event.delete_series() # notify via slack that event was deleted and by who if 'HTTP_HOST' in request.META and request.META['HTTP_HOST']==get_config('WEBSITE.DOMAIN'): send_message('🚫'+str(request.user)+' deleted the event "'+event.str_name_en_US+'"') response = JsonResponse({'success': True}) response.status_code = 200 log('--> return response') return response
def trainAndSaveModel(input_set, steps): gConfig = {} gConfig = getConfig.get_config(config_file='config.ini') num_epochs = gConfig['num_epochs'] num_clusters = gConfig['num_clusters'] #定义input_fn函数 input_fn = lambda: tf.train.limit_epochs(tf.convert_to_tensor( input_set, dtype=tf.float32), num_epochs=num_epochs) #实例化KMeansClustering kmeans = tf.contrib.factorization.KMeansClustering( num_clusters=num_clusters, use_mini_batch=False) previous_centers = None for _ in xrange(gConfig['steps']): kmeans.train(input_fn) #调用train对训练数据进行训练 centers = kmeans.cluster_centers() #保存质心 if previous_centers is not None: print("质心变化幅度:", centers - previous_centers) previous_centers = centers print("模型评估得分:", kmeans.score(input_fn)) #将模型保存下来ˇ modelPath = kmeans.export_savedmodel( export_dir_base="kmeansMode/", serving_input_receiver_fn=serving_input_receiver_fn) print("训练完成,model文件存放在:") print(modelPath)
def approve_event_view(request): log('approve_event_view(request)') if request.user.is_authenticated==False: log('--> Failed: User not logged in') response = JsonResponse({'success': False}) response.status_code = 403 elif not request.GET.get('str_slug', None) or Event.objects.filter(boolean_approved=False,str_slug=request.GET.get('str_slug', None)).exists()==False: log('--> Failed: Result not found') response = JsonResponse({'success': False}) response.status_code = 404 else: from hackerspace.APIs.slack import send_message DOMAIN = get_config('WEBSITE.DOMAIN') # approve event and all upcoming ones event = Event.objects.filter(boolean_approved=False,str_slug=request.GET.get('str_slug', None)).first() upcoming_events = Event.objects.filter(boolean_approved=False,str_name_en_US=event.str_name_en_US).all() log('--> Approve all upcoming events') for event in upcoming_events: event.publish() # notify via slack that event was approved and by who if 'HTTP_HOST' in request.META and request.META['HTTP_HOST']==DOMAIN: send_message('✅'+str(request.user)+' approved the event "'+event.str_name_en_US+'":\nhttps://'+DOMAIN+'/'+event.str_slug) response = JsonResponse({'success': True}) response.status_code = 200 log('--> return response') return response
def json_data(self): return { 'str_name_en_US':self.str_name_en_US, 'url_hackerspace_event':'https://'+get_config('WEBSITE.DOMAIN')+'/'+self.str_slug, 'datetime_start':str(self.datetime_start), 'datetime_end':str(self.datetime_end), 'str_timezone':self.str_timezone, 'int_UNIXtime_event_start':self.int_UNIXtime_event_start, 'int_minutes_duration':self.int_minutes_duration, 'int_UNIXtime_event_end':self.int_UNIXtime_event_end, 'url_featured_photo':self.url_featured_photo, 'text_description_en_US':self.text_description_en_US, 'str_location':self.str_location, 'float_lat':self.float_lat, 'float_lon':self.float_lon, 'str_space':self.one_space.str_name_en_US if self.one_space else None, 'str_guilde':self.one_guilde.str_name_en_US if self.one_guilde else None, 'list_hosts':[x.str_name_shortened for x in self.many_hosts.all()], 'int_series_startUNIX':self.int_series_startUNIX, 'int_series_endUNIX':self.int_series_endUNIX, 'str_series_repeat_how_often':self.str_series_repeat_how_often, 'str_crowd_size':self.str_crowd_size, 'str_welcomer':self.str_welcomer, 'url_meetup_event':self.url_meetup_event, 'url_discourse_event':self.url_discourse_event, 'int_UNIXtime_created':self.int_UNIXtime_created, 'int_UNIXtime_updated':self.int_UNIXtime_updated, }
def create_meetup_event(self): # API Doc: https://www.meetup.com/meetup_api/docs/:urlname/events/#create log('event.create_meetup_event()') import requests from getKey import STR__get_key from getConfig import get_config if not STR__get_key('MEETUP.ACCESS_TOKEN'): log('--> No MEETUP.ACCESS_TOKEN') log('--> return None') return None response = requests.post('https://api.meetup.com/'+get_config('EVENTS.MEETUP_GROUP')+'/events', params={ 'access_token': STR__get_key('MEETUP.ACCESS_TOKEN'), 'sign': True, 'announce': False, 'publish_status':'draft', 'description': self.text_description_en_US, 'duration':self.int_minutes_duration*60*1000, 'event_hosts':None,# TODO figure out meetup user IDs and how to add them here 'fee':{ 'accepts':None, # TODO add option for paid events later 'amount':None, 'currency':None, 'refund_policy':None }, 'guest_limit':2, # from 0 to 2 'how_to_find_us':get_config('PHYSICAL_SPACE.ADDRESS')['HOW_TO_FIND_US'], 'lat':self.float_lat, 'lon':self.float_lon, 'name':self.str_name_en_US, 'self_rsvp':False, 'time': self.int_UNIXtime_event_start, 'venue_id':None, #TODO figure out how to get venue id 'venue_visibility':None #TODO }) #TODO returns 400 response - scope_error: Insufficient oauth scope if response.status_code==200: self.url_meetup_event = response.json()['link'] self.save() log('--> return event') else: log('--> '+str(response.status_code)+' response: '+str(response.json())) return self
def save(self, *args, **kwargs): try: log('event.save()') import urllib.parse from hackerspace.models.events import RESULT__updateTime from hackerspace.models import Space, Person import bleach from getConfig import get_config import re log('--> clean from scripts') if self.str_name_en_US: self.str_name_en_US = bleach.clean(self.str_name_en_US) if self.text_description_en_US: if not self.url_meetup_event: self.text_description_en_US = bleach.clean(self.text_description_en_US) if self.text_description_he_IL: self.text_description_he_IL = bleach.clean(self.text_description_he_IL) if self.str_location: self.str_location = bleach.clean(self.str_location).replace('<br>','<br>') if self.str_series_repeat_how_often: self.str_series_repeat_how_often = bleach.clean(self.str_series_repeat_how_often) if self.text_series_timing: self.text_series_timing = bleach.clean(self.text_series_timing) if self.str_crowd_size: self.str_crowd_size = bleach.clean(self.str_crowd_size) if self.str_welcomer: self.str_welcomer = bleach.clean(self.str_welcomer) if self.str_timezone: self.str_timezone = bleach.clean(self.str_timezone) self = RESULT__updateTime(self) if not self.str_slug: self.str_slug = 'event/'+(str(self.datetime_start.date())+'-' if self.datetime_start else '')+re.sub('[\W_]+', '', self.str_name_en_US.lower()) counter=0 while Event.objects.filter(str_slug=self.str_slug).exists()==True: counter+=1 self.str_slug = 'event/'+(str(self.datetime_start.date())+'-' if self.datetime_start else '')+re.sub('[\W_]+', '', self.str_name_en_US.lower())+str(counter) log('--> Save lat/lon if not exist yet') if not self.float_lat: self.str_location, self.float_lat, self.float_lon = get_lat_lon_and_location(self.str_location) super(Event, self).save(*args, **kwargs) log('--> Save hosts') if not self.many_hosts.exists(): EVENTS_HOSTS_OVERWRITE = get_config('EVENTS.EVENTS_HOSTS_OVERWRITE') # search in predefined event hosts in YOURHACKERSPACE for event_name in EVENTS_HOSTS_OVERWRITE: if event_name in self.str_name_en_US: for host_name in EVENTS_HOSTS_OVERWRITE[event_name]: host = Person.objects.QUERYSET__by_name(host_name) if host: self.many_hosts.add(host) except: log('--> ERROR: coudlnt save event - '+str(self))
def init_session(sess, conf='config.ini'): global gConfig gConfig = getConfig.get_config(conf) cnn_pretrained_classifier = tf.estimator.Estimator( model_fn=model.cnn_model_fn, model_dir=os.path.join(model_dir, 'cnn_pretrained'), params=model.params) return sess, cnn_pretrained_classifier
def createEvent(event): log('createEvent(event)') from getConfig import get_config from dateutil.parser import parse from datetime import datetime HACKERSPACE_NAME = get_config('BASICS.NAME') HACKERSPACE_ADDRESS = get_config('PHYSICAL_SPACE.ADDRESS') str_location_name = event['venue']['name'] if 'venue' in event and event['venue']['name'] and event[ 'venue']['name'] != HACKERSPACE_NAME else HACKERSPACE_NAME str_location_street = event['venue']['address_1'] if 'venue' in event and event['venue']['name'] and event[ 'venue']['name'] != HACKERSPACE_NAME else HACKERSPACE_ADDRESS['STREET'] str_location_zip = event['venue']['zip'] if 'venue' in event and 'zip' in event['venue'] and event['venue']['name'] and event[ 'venue']['name'] != HACKERSPACE_NAME else HACKERSPACE_ADDRESS['ZIP'] str_location_city = event['venue']['city'] if 'venue' in event and 'city' in event['venue'] and event['venue']['name'] and event[ 'venue']['name'] != HACKERSPACE_NAME else HACKERSPACE_ADDRESS['CITY'] str_location_countrycode = event['venue']['country'].upper() if 'venue' in event and 'country' in event['venue'] and event['venue']['name'] and event[ 'venue']['name'] != HACKERSPACE_NAME else HACKERSPACE_ADDRESS['COUNTRYCODE'] Event().create(json_content={ 'str_name_en_US': event['name'] if 'name' in event else event['title'], 'int_UNIXtime_event_start': round(event['time']/1000) if 'time' in event else parse(event['event']['start']).timestamp(), 'int_UNIXtime_event_end': round(event['time']/1000)+(event['duration']/1000) if 'time' in event else parse(event['event']['end']).timestamp() if 'end' in event['event'] else parse(event['event']['start']).timestamp()+(120*60), 'int_minutes_duration': ((event['duration']/1000)/60) if 'duration' in event else round( (parse(event['event']['end']).timestamp() - parse(event['event']['start']).timestamp())/1000) if 'end' in event['event'] else 120, 'url_featured_photo': event['featured_photo']['photo_link'] if 'featured_photo' in event else event['image_url'] if 'image_url' in event and event['image_url'] else None, 'text_description_en_US': event['description'], 'str_location': str_location_name+'\n'+str_location_street+'\n'+str_location_zip+', '+str_location_city+', '+str_location_countrycode, 'one_space': RESULT__extractSpace(event), 'one_guilde': RESULT__extractGuilde(event), 'str_series_id': event['series']['id'] if 'series' in event else None, 'int_series_startUNIX': round( event['series']['start_date'] / 1000) if 'series' in event and 'start_date' in event['series'] else None, 'int_series_endUNIX': round( event['series']['end_date'] / 1000) if 'series' in event and 'end_date' in event['series'] else None, 'text_series_timing': 'weekly: '+str(event['series']['weekly']) if 'series' in event and 'weekly' in event['series'] else 'monthly: ' + str(event['series']['monthly'] ) if 'series' in event and 'monthly' in event['series'] else None, 'url_meetup_event': event['link'] if 'link' in event else None, 'int_UNIXtime_created': event['created'] if 'created' in event else parse(event['created_at']).timestamp(), 'int_UNIXtime_updated': event['updated'] if 'updated' in event else parse(event['last_posted_at']).timestamp(), 'str_timezone': STR__extractTimezone(event) } )
def RESULT__extractSpace(json_meetup_result): log('RESULT__extractSpace(json_meetup_result)') from hackerspace.models import Space from getConfig import get_config if 'how_to_find_us' in json_meetup_result: spaces = Space.objects.all() for space in spaces.iterator(): if space.str_name_en_US.lower() in json_meetup_result['how_to_find_us'].lower(): return space # else... EVENTS_SPACES_OVERWRITE = get_config('EVENTS.EVENTS_SPACES_OVERWRITE') for field in EVENTS_SPACES_OVERWRITE: if field in json_meetup_result['name']: return Space.objects.QUERYSET__by_name(EVENTS_SPACES_OVERWRITE[field]) else: return Space.objects.QUERYSET__by_name(get_config('EVENTS.EVENTS_SPACE_DEFAULT'))
def openMeetingNotes(): import time from getConfig import get_config browser = startChrome( headless=True, url='https://pad.riseup.net/p/'+get_config('MEETINGS.RISEUPPAD_MEETING_PATH')) time.sleep(5) browser.switch_to_frame(0) browser.switch_to_frame(0) return browser
def init_session(sess, conf='config.ini'): global gConfig gConfig = getConfig.get_config(conf) feature_cols = [tf.contrib.layers.real_valued_column(k) for k in FEATURES] model = tf.contrib.learn.DNNClassifier( feature_columns=feature_cols, hidden_units=[105, 105, 105, 105, 105, 105, 105, 105], n_classes=2, model_dir=gConfig['model_dir']) return sess, model
def parse_log_all_func(line_no): ''' 读取配置文件,得到所有预置功能关键词,按照功能关键词条件解析该行log :param line_no: 行号 :return: 无 ''' import getConfig config = getConfig.get_config() for item in config: parse_log_func(line_no, config[item], item)
def date(self): import pytz from datetime import datetime from getConfig import get_config local_timezone = pytz.timezone( get_config('PHYSICAL_SPACE.TIMEZONE_STRING')) local_time = datetime.fromtimestamp( self.int_UNIXtime_created, local_timezone) return local_time.date()
def STR__extractTimezone(json_meetup_result): log('STR__extractTimezone(json_meetup_result)') from getConfig import get_config TIMEZONE_STRING = get_config('PHYSICAL_SPACE.TIMEZONE_STRING') if 'utc_offset' in json_meetup_result and json_meetup_result['utc_offset'] != INT__timezoneToOffset(TIMEZONE_STRING): return LIST__offsetToTimezone(json_meetup_result['utc_offset']) log('--> return STR') return TIMEZONE_STRING
def import_from_meetup(self, slug=get_config('EVENTS.MEETUP_GROUP')): log('Event.objects.import_from_meetup(self,slug={})'.format(slug)) import requests from getConfig import get_config import time if slug: show_message( '✅ Start importing existing events from "'+slug+'" on Meetup.') time.sleep(2) if requests.get('https://meetup.com/'+slug).status_code == 200: json_our_group = requests.get('https://api.meetup.com/'+slug+'/events', params={ 'fields': [ 'featured_photo', 'series', 'simple_html_description', 'rsvp_sample', 'description_images', ], 'photo-host': 'public' }).json() log('--> Saving '+str(len(json_our_group)) + ' events from our hackerspace group') for event in json_our_group: if slug == get_config('EVENTS.MEETUP_GROUP') or get_config('BASICS.NAME') in event['name']: createEvent(event) log('--> Done! Saved '+str(len(json_our_group)) + ' events from Meetup') else: show_message( 'WARNING: I can\'t access the Meetup group. Is the URL correct? Will skip the Meetup group for now.') time.sleep(4) else: show_message( 'WARNING: Can\'t find the Meetup group in your config.json. Will skip the Meetup group for now.') time.sleep(4)
def wiki_search(query, limit=5): from getConfig import get_config if not get_config('BASICS.WIKI.API_URL'): log('--> BASICS.WIKI.API_URL not found in config.json -> BASICS - Please add your WIKI_API_URL first.' ) return [] # search in the Noisebridge wiki -> returns the search suggestions of the mini search bar on the top right response_json = requests.get( get_config('BASICS.WIKI.API_URL') + '?action=opensearch&format=json&formatversion=2&search=' + query + '&namespace=0&limit=' + str(limit) + '&suggest=true').json() result_names = response_json[1] result_urls = response_json[3] results = [] for idx, val in enumerate(result_names): results.append({'icon': 'wiki', 'name': val, 'url': result_urls[idx]}) return results
def step(self, sess, shuffled_data, shuffled_labels, graph, forward_only=None): keep_prop = tf.placeholder(tf.float32) gConfig = getConfig.get_config(config_file='config.ini') #是否只进行正向传播,及正向传播是进行预测,反向传播是进行训练 if forward_only: keep_prop = graph.get_tensor_by_name(name="keep_prop:0") data_tensor = graph.get_tensor_by_name(name="data_tensor:0") k_size = gConfig['percent'] * gConfig['dataset_size'] / 100 dataset_array = np.random.rand(int(k_size), 32, 32, 3) dataset_array[0, :, :, :] = shuffled_data print(shuffled_data) print(dataset_array[0]) print(dataset_array) feed_dict_test = {data_tensor: dataset_array, keep_prop: 1.0} softmax_propabilities_, softmax_predictions_ = sess.run( [self.softmax_propabilities, self.softmax_predictions], feed_dict=feed_dict_test) file = gConfig['dataset_path'] + "batches.meta" patch_bin_file = open(file, 'rb') label_names_dict = pickle.load(patch_bin_file) print(label_names_dict) print(softmax_predictions_[0]) print(softmax_predictions_) print(Counter(softmax_predictions_).most_common(1)) #k=Counter(softmax_predictions_).most_common(1) #print(k) dataset_label_names = label_names_dict["label_names"] return dataset_label_names[softmax_predictions_[0]] else: cnn_feed_dict = { self.data_tensor: shuffled_data, self.label_tensor: shuffled_labels, keep_prop: gConfig['keeps'] } softmax_predictions_, _ = sess.run( [self.softmax_predictions, self.ops], feed_dict=cnn_feed_dict) # 统计预测争取的数量 correct = np.array( np.where(softmax_predictions_ == shuffled_labels)) accuracy = correct.size / (self.percent * gConfig['dataset_size'] / 100) return accuracy #输出准确率 """
def __open_file(self, file): ''' 打开log文件,解析得到log内容(列表),得到log行数 :param file: log文件名 :return: log内容 ''' with open(file, 'r', encoding='utf-8', errors='ignore') as f: self.log = f.readlines() self.lines = len(self.log) # 读取配置文件,得到所有预置功能关键词 import getConfig self.config = getConfig.get_config() return self.log # string list
def RESULT__extractGuilde(json_meetup_result): log('RESULT__extractGuilde(json_meetup_result)') from hackerspace.models import Guilde from getConfig import get_config EVENTS_GUILDES_OVERWRITE = get_config('EVENTS.EVENTS_GUILDES_OVERWRITE') for str_keyword in EVENTS_GUILDES_OVERWRITE: if str_keyword in json_meetup_result['name']: log('--> {} found in result.name'.format(str_keyword)) log('--> return guilde') return Guilde.objects.filter(str_name_en_US=EVENTS_GUILDES_OVERWRITE[str_keyword]).first() else: log('--> return None') return None
def init_session(sess, conf='seq2seq.ini'): global gConfig gConfig = getConfig.get_config(conf) # Create model and load parameters. model = create_model(sess, True) model.batch_size = 1 # We decode one sentence at a time. # Load vocabularies. enc_vocab_path = os.path.join(gConfig['working_directory'], "vocab%d.enc" % gConfig['enc_vocab_size']) dec_vocab_path = os.path.join(gConfig['working_directory'], "vocab%d.dec" % gConfig['dec_vocab_size']) enc_vocab, _ = data_utils.initialize_vocabulary(enc_vocab_path) _, rev_dec_vocab = data_utils.initialize_vocabulary(dec_vocab_path) return sess, model, enc_vocab, rev_dec_vocab
def DATETIME__from_date_and_time_STR(str__date, str__time): import pytz from datetime import datetime from getConfig import get_config TIMEZONE_STRING = get_config('PHYSICAL_SPACE.TIMEZONE_STRING') if 'AM' in str__time or 'PM' in str__time: datetime_input = pytz.timezone(TIMEZONE_STRING).localize( datetime.strptime( str(str__date + ' ' + str__time.replace(' ', '')), "%Y-%m-%d %I:%M%p")) else: datetime_input = pytz.timezone(TIMEZONE_STRING).localize( datetime.strptime( str(str__date + ' ' + str__time.replace(' ', '')), "%Y-%m-%d %H:%M")) log('--> return DATETIME') return datetime_input
def create_discourse_event(self): log('event.create_discourse_event()') from hackerspace.APIs.discourse import create_post from django.template.loader import get_template if self.str_series_repeat_how_often: name = (self.repeating +' | '+self.time_range_text+' | '+self.str_name_en_US) else: name = self.datetime_range_text+' | '+self.str_name_en_US self.url_discourse_event = create_post( name, get_template('components/discourse/event_post.html').render({ 'result': self }), get_config('EVENTS.DISCOURSE_EVENTS_CATEGORY')) super(Event, self).save() log('--> return event') return self
# coding=utf-8 import math import os import random import getConfig import jieba gConfig = {} gConfig=getConfig.get_config() conv_path = gConfig['resource_data'] if not os.path.exists(conv_path): exit() convs = [] # 用于存储对话集合 with open(conv_path) as f: one_conv = [] # 存储一次完整对话 for line in f: line = line.strip('\n').replace('/', '')#去除换行符,并在字符间添加空格符,原因是用于区分 123 与1 2 3. if line == '': continue if line[0] == gConfig['e']: if one_conv: convs.append(one_conv) one_conv = [] elif line[0] == gConfig['m']: