def put(self, args=None, **kw): request = self.request appid = args or gen_id() uaid = request.headers.get('X-UserAgent-ID', gen_id()) gid = '%s.%s' % (uaid, appid) if self.storage.register_appid(uaid, gid, self.logger): self.write(json.dumps({'channelID': appid, 'uaid': uaid, 'pushEndpoint': gen_endpoint(self.config, gid)})) else: self.send_error(409) # CONFLICT
def __format_tier(root, tier): # Write the Layer element tier_root = ET.SubElement(root, 'Layer') # Write all the elements SPPAS has interpreted child_id = ET.SubElement(tier_root, 'Id') tier_id = tier.metadata.get( 'id', gen_id() ) # get either the id we have or create one tier.metadata[ 'id' ] = tier_id # it ensures the tier has really an id child_id.text = tier_id child_name = ET.SubElement(tier_root, 'Name') child_name.text = tier.GetName() # Either get metadata in tier or assign the default value for key,value in ELT_REQUIRED_Layer.iteritems(): if not key in [ 'id', 'name' ]: # here, we have to restore original upper/lower case for the key child = ET.SubElement(tier_root, UpperLowerDict[key]) child.text = tier.metadata.get( key, value ) # We also add all Antx optional elements for key,value in ELT_OPTIONAL_Layer.iteritems(): # here, we have to restore original upper/lower case for the key child = ET.SubElement(tier_root, UpperLowerDict[key]) child.text = tier.metadata.get( key, value )
def __format_segment(self, root, tier, ann): segment_root = ET.SubElement(root, 'Segment') # Write all the elements SPPAS has interpretated child_id = ET.SubElement(segment_root, 'Id') # Id child_id.text = ann.metadata.get( 'id', gen_id() ) child_idlayer = ET.SubElement(segment_root, 'IdLayer') # IdLayer child_idlayer.text = tier.metadata[ 'id' ] child_idlabel = ET.SubElement(segment_root, 'Label') # Label child_idlabel.text = ann.GetLabel().GetValue() child_idstart = ET.SubElement(segment_root, 'Start') # Start child_iddur = ET.SubElement(segment_root, 'Duration') # Duration if ann.GetLocation().IsPoint(): start = ann.GetLocation().GetPoint().GetMidpoint() else: start = ann.GetLocation().GetBegin().GetMidpoint() duration = ann.GetLocation().GetDuration() start = start * float( self.metadata.get('samplerate', 44100) ) duration = duration * float( self.metadata.get('samplerate', 44100) ) child_idstart.text = str(start) child_iddur.text = str(duration) # Antx required elements for key,value in ELT_REQUIRED_Segment.iteritems(): if not key in [ 'id','idlayer', 'label', 'start', 'duration' ]: child = ET.SubElement(segment_root, UpperLowerDict[key]) child.text = ann.metadata.get( key, value ) # We also add all Antx optional elements for key,value in ELT_OPTIONAL_Segment.iteritems(): child = ET.SubElement(segment_root, UpperLowerDict[key]) child.text = ann.metadata.get( key, value )
def read(self, filename): with codecs.open(filename, 'r', 'utf-8') as fp: channels = {} for line in fp: if(line.strip().startswith(';;') or line.strip() == ''): continue line = line.strip().split(None, 5) wavname, channel, speaker, begin, end, word = line if channel in channels: tier = channels[channel] else: tier = self.NewTier(channel) channels[channel] = tier tier.metadata['speaker'] = speaker interval = TimeInterval( TimePoint(float(begin)), TimePoint(float(end))) label = Label(word) tier.Add(Annotation(interval, label)) m = Media(gen_id(), wavname) self.SetMedia( m ) for tier in self: tier.SetMedia( m ) self.SetName( wavname ) self.SetMinTime(0.) self.SetMaxTime(self.GetEnd())
def hello(self, msg): """ Register the UAID as an active listener. """ status = 200 if 'uaid' in msg: self.uaid = msg['uaid'] else: self.uaid = gen_id() if not self.dispatch.register(self.uaid, self.flush, msg): self.logger.log(type='error', severity=LOG.DEBUG, msg="Could not register uaid %s" % self.uaid) status = 500 # This is a bad request. Close the channel immediately. try: self.send({"messageType": "hello", "status": status, "uaid": self.uaid}) except Exception: pass self.close() # chain the flush function to send existing data. # Note: For alternate protocols (e.g. UDP, initialize the calls here.) return ({"messageType": "hello", "status": status, "uaid": self.uaid}, self.flush)
def open_review_bib_iclr_2018_2019(year, localfile): # this is different from others .. # year = 2018 soup = BeautifulSoup(open(localfile), 'lxml') res = '' sections = [ '#accepted-oral-papers li.note', '#accepted-poster-papers li.note', '#workshop-papers li.note' ] confs = ['ICLR', 'ICLR', 'ICLRWorkshop'] for section, conf in zip(sections, confs): for paper in soup.select(section): title = paper.h4.a.get_text().strip() # there is one error in 2018 if title == 'No Title': continue pdflink = paper.select('.pdf-link')[0]['href'] if not pdflink.startswith('http'): pdflink = 'https://openreview.net/' + pdflink authors_str = paper.select('.note-authors')[0].get_text() authors = authors_str2lst(authors_str) id = gen_id(year, conf, authors, title) bib = gen_single_bib(id, title, ' and '.join(authors), pdflink, year, conf) res += bib return res
def open_review_bib_iclr_2013_2017(conf, year, localfile): # need to download manually first_reject_13 = 'Heteroscedastic Conditional Ordinal Random' first_reject_17 = 'Energy-Based Spherical Sparse Coding' first_workshop_13 = 'Why Size Matters: Feature Coding as Nystrom Sampling' first_workshop_17 = 'Learning Continuous Semantic Representations' assert (year in [2013, 2017]) if year == 2013: first_reject = first_reject_13 first_workshop = first_workshop_13 else: first_reject = first_reject_17 first_workshop = first_workshop_17 soup = BeautifulSoup(open(localfile), 'lxml') res = '' for paper in soup.select('#notes .note.panel'): title = paper.select('h2')[0].get_text().strip() pdflink = paper.select('a.note_content_pdf')[0]['href'] if first_reject in title: break if first_workshop in title: conf = 'ICLRWorkshop' if not pdflink.startswith('http'): pdflink = 'https://openreview.net/' + pdflink authors_str = paper.select('.meta_row')[0].get_text() authors = authors_str2lst(authors_str) id = gen_id(year, conf, authors, title) bib = gen_single_bib(id, title, ' and '.join(authors), pdflink, year, conf) res += bib return res
def iclr_bib_2015_2016(year): assert (year == 2015 or year == 2016) # 15 first workshop: first_workshop_15 = 'Learning Non-deterministic Representations with Energy-based Ensembles' link = ('https://iclr.cc/archive/www/doku.php%3Fid=iclr' + str(year) + ':accepted-main.html') conf = 'ICLR' html_file = download_to_hash(link) soup = BeautifulSoup(open(html_file), 'lxml') res = '' for div in soup.select('li.level1 div'): title = div.a.get_text() pdflink = div.a['href'] div.a.decompose() authors_str = div.get_text() authors = authors_str2lst(authors_str) if authors: # change title to workshop if year == 2015 and first_workshop_15 in title: conf = 'ICLRWorkshop' id = gen_id(year, conf, authors, title) bib = gen_single_bib(id, title, ' and '.join(authors), pdflink, year, conf) res += bib return res
def iclr_bib_2014(): link = 'https://iclr.cc/archive/2014/conference-proceedings/' year = 2014 conf = 'ICLR' html_file = download_to_hash(link) soup = BeautifulSoup(open(html_file), 'lxml') # all blocks ps = soup.select('#sites-canvas-main-content p') # remove the empty blocks ps = [p for p in ps if p.get_text().strip()] # remove the conference title: Listed below are the conference papers .. ps = ps[1:] # titles ptitles = [p for p in ps if p.find('a')] # authors pauthors = [p for p in ps if not p.find('a')] [p.get_text().strip() for p in pauthors] [p.get_text().strip() for p in ptitles] res = '' for ptitle, pauthor in zip(ptitles, pauthors): title = ptitle.get_text().strip() authors_str = pauthor.get_text().strip() authors = authors_str2lst(authors_str) # actually arxiv pdflink = ptitle.find('a')['href'] id = gen_id(year, conf, authors, title) bib = gen_single_bib(id, title, ' and '.join(authors), pdflink, year, conf) res += bib return res
def newSession(): '''Start a new attack session''' global conn # connect to the DB if not already connected connect() # first check if any session is running sess = sql.check_running_sessions(conn) if sess is not None: print("Session", sess, "is already running... Try resuming.") end_session(2) # no sessions running - start one # generate an ID id = utils.gen_id() # get host IP and username from user host = input("FTP Host to attack: ") username = input("Target username: ") # check FTP server res = ftp.check_ftp(host) if (res == 0): end_session(3) # Start the session print(utils.sess_setup()) done = sql.start_session(conn, id, host, username) if not done: end_session(5) # clean up the flags sql.clear_flags(conn) # OK - finally ready to start attack # print a nice message print(utils.attack_start()) # attack! attack_prog(id, host, username)
def read(self, filename): """ Read an Xtrans file and fill the Transcription. It creates a tier for each speaker-channel observed in the file. """ with codecs.open(filename, 'r', 'utf-8') as fp: lines = fp.readlines() rownames = lines[0].split('\t') lines.pop(0) medias = {} # Extract rows, create tiers and metadata. for line in lines: # a comment if line.startswith(';;'): continue # a tab-delimited line line = line.split('\t') # fix the name of the tier channel = line[rownames.index('channel;int')] speaker = line[rownames.index('speaker;unicode')] tiername = speaker+'-'+channel # check for the tier (find it or create it) tier = self.Find(tiername) if tier is None: tier = Tier(tiername) mediaurl = line[rownames.index('file;unicode')] if not mediaurl in medias: mediaid = gen_id() medias[mediaurl] = mediaid mediaid = medias[mediaurl] (mediamime,mediaencoding) = mimetypes.guess_type(mediaurl) media = Media( mediaid, mediaurl, mediamime ) if mediaencoding is not None: media.metadata[ "encoding" ] = mediaencoding tier.SetMedia( media ) tier.metadata[ "speakerName" ] = speaker tier.metadata[ "speakerType" ] = line[ rownames.index('speakerType;unicode') ] tier.metadata[ "speakerDialect" ] = line[ rownames.index('speakerDialect;unicode') ] tier.metadata[ "mediaChannel" ] = channel self.Append( tier ) # Add the new annotation label = Label( line[rownames.index('transcript;unicode')] ) begin = TimePoint( float( line[rownames.index('start;float')] ) ) end = TimePoint( float( line[rownames.index('end;float')] ) ) new_ann = Annotation(TimeInterval(begin,end), label) tier.Add( new_ann )
def __read_media(self, mediaRoot): # Create a Media instance mediaid = gen_id() mediaurl = mediaRoot.attrib['MEDIA_URL'] mediamime = '' if 'MIME_TYPE' in mediaRoot.attrib: mediamime = mediaRoot.attrib['MIME_TYPE'] media = Media( mediaid,mediaurl,mediamime ) # Add metadata if 'RELATIVE_MEDIA_URL' in mediaRoot.attrib: media.metadata['RELATIVE_MEDIA_URL'] = mediaRoot.attrib['RELATIVE_MEDIA_URL'] # Add media into Transcription(); # but media not linked to tiers... Elan doesn't propose it self.AddMedia( media )
def __format_tier(self, tierRoot, tier): id = gen_id() #'t%d' % self.__tier_counter tierRoot.set("id", id) tier.metadata [ 'id' ] = id self.__tier_id_map[tier] = id self.__tier_counter += 1 tierRoot.set("tiername", tier.GetName()) metadataRoot = ET.SubElement(tierRoot, 'Metadata') XRA.__format_metadata(metadataRoot, tier) if len(metadataRoot.findall('Entry')) == 0: tierRoot.remove(metadataRoot) for annotation in tier: annotationRoot = ET.SubElement(tierRoot, 'Annotation') XRA.__format_annotation(annotationRoot, annotation)
def acl_conference_bib(year, conf, link): # link = 'https://aclanthology.info/events/acl-2018' # year = 2018 # conf = 'ACL' html_file = download_to_hash(link) soup = BeautifulSoup(open(html_file), 'lxml') res = '' # soup.select('#content p')[3].select('a[href^=/people]') # len(soup.select('#content p')) for p in soup.select('#content p'): strong = p.strong title = strong.a.get_text() authors = [a.get_text() for a in p.select('a[href^=/people]')] if authors: pdflink = p.a['href'] id = gen_id(year, conf, authors, title) bib = gen_single_bib(id, title, ' and '.join(authors), pdflink, year, conf) res += bib return res
def read(self, filename): with codecs.open(filename, 'r', 'utf-8') as fp: channels = {} for line in fp: if line.strip().startswith(';;') or line.strip() == '': continue line = line.strip().split() wavname, channel, begin, duration, word = line[:5] if len(line) > 5: score = line[-1] else: score = None if channel in channels: tier = channels[channel] else: tier = self.NewTier(channel) channels[channel] = tier interval = TimeInterval( TimePoint(float(begin)), TimePoint(float(begin) + float(duration))) label = Label(word) if score is not None: label.Get()[0].SetScore(float(score)) tier.Add(Annotation(interval, label)) m = Media(gen_id(), wavname) self.SetMedia( m ) for tier in self: tier.SetMedia( m ) self.SetName( wavname ) self.SetMinTime(0.) self.SetMaxTime(self.GetEnd())
def nips_bib(year): ID = year - 1988 + 1 conf = 'NIPS' link = 'https://papers.nips.cc/book/advances-in-neural-information-processing-systems-' + str( ID) + '-' + str(year) html_file = download_to_hash(link) NIPS_pdf_prefix = 'https://papers.nips.cc/' res = '\n' with open(html_file) as f: soup = BeautifulSoup(f, 'html.parser') for li in soup.select("div.main")[0].ul.find_all('li'): href = li.a['href'] title = li.a.string authors = list( map(lambda author: author.string, li.select('.author'))) pdflink = NIPS_pdf_prefix + href + '.pdf' if title and authors: id = gen_id(year, conf, authors, title) bib = gen_single_bib(id, title, ' and '.join(authors), pdflink, year, conf) res += bib return res
def ieee_conference_bib(year, conf, link): html_file = download_to_hash(link) soup = BeautifulSoup(open(html_file), 'lxml') res = '' for div in soup.select('.txt'): if div.h3.a and div.select('.authors a'): for f in div.select('formula'): f.decompose() title = div.h3.get_text() # "/document/8461262/" arnumber = re.findall(r'/(\d+)/', div.h3.a['href'])[0] pdflink = 'https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=' + arnumber authors = [ author.get_text().strip() for author in div.select('.authors a') ] # print(authors) id = gen_id(year, conf, authors, title) bib = gen_single_bib(id, title, ' and '.join(authors), pdflink, year, conf) res += bib return res
def springer_bib(year, conf, link): """Return bib for this page only. """ html_file = download_to_hash(link) soup = BeautifulSoup(open(html_file), 'lxml') res = '' for paper in soup.select('.chapter-item'): meta = paper.select('.content-type-list__meta')[0] title = meta.select('div')[0].get_text() authors_str = meta.select('div')[1].get_text() authors = authors_str2lst(authors_str) pdflink_a = paper.select('a.test-book-toc-download-link') pdflink = '' # some conference may not have a pdflink, e.g. # https://link.springer.com//book/10.1007/BFb0015518 if pdflink_a: pdflink = pdflink_a[0]['href'] if not pdflink.startswith('http'): pdflink = 'https://link.springer.com/' + pdflink id = gen_id(year, conf, authors, title) bib = gen_single_bib(id, title, ' and '.join(authors), pdflink, year, conf) res += bib return res
'corresponds to the number of samples z ~ q(z) taken to evaluate the ELBO' 'NOTE: currently not implemented') # Model parameters parser.add_argument('--truncation', type=int, default=6, help='number of sticks') parser.add_argument('--alpha0', type=float, default=5., help='prior alpha for stick breaking Betas') parser.add_argument('--hidden', type=int, default=500, help='hidden states') parser.add_argument('--uuid', type=str, default=gen_id(), help='(somewhat) unique identifier for the model/job') parser.add_argument('--temp', type=float, default=1.0, help='Temperature of Concrete approximation used') # Logging + saving parameters parser.add_argument('--train-from', type=str, default=None, metavar='M', help='model to train from, if any') parser.add_argument( '--save', type=str,
def add_user_submit(): if request.method == 'POST': # get the form data transmitted by Ajax # form is an ImmutableMultiDict object # https://tedboy.github.io/flask/generated/generated/werkzeug.ImmutableMultiDict.html form = request.form # member_id = form.get('member_id') first_name = form.get('first_name').strip() last_name = form.get('last_name').strip() other_names = form.get('other_names').strip() gender = form.get('gender') occupation = form.get('occupation').strip() contact_phone_1 = form.get('contact_phone_1').strip() contact_phone_2 = form.get('contact_phone_2').strip() dob = form.get('dob') email = form.get('email').strip() marital_status = form.get('marital_status') assembly = form.get('assembly') ministry = form.getlist('ministry') group = form.get('group') password = form.get('password') comm_email = form.get('comm_email') comm_sms = form.get('comm_sms') comm_phone = form.get('comm_phone') address_line_1 = form.get('address_line_1').strip() address_line_2 = form.get('address_line_2').strip() digital_address_code = form.get('digital_address_code').strip() region = form.get('region').strip() district = form.get('district').strip() country = form.get('country') try: member_id = utils.gen_id(assembly, ministry) if not member_id: return Response(json.dumps({ 'status': 'FAIL', 'message': 'Invalid combination of ministries.' }), status=400, mimetype='application/json') if not utils.upload_photo(member_id): return Response(json.dumps({ 'status': 'FAIL', 'message': 'Image error. Invalid photo.' }), status=400, mimetype='application/json') if utils.check_email_duplicates(email): return Response(json.dumps({ 'status': 'FAIL', 'message': 'Email already exists.' }), status=400, mimetype='application/json') if utils.check_contact_duplicates(contact_phone_1): return Response(json.dumps({ 'status': 'FAIL', 'message': 'Contact 1 already exists.' }), status=400, mimetype='application/json') # create new user object user = User(member_id=member_id, first_name=first_name, last_name=last_name, other_names=other_names, occupation=occupation, email=email, marital_status=marital_status, assembly=assembly, address_line_1=address_line_1, address_line_2=address_line_2, digital_address_code=digital_address_code, region=region, district=district, country=country) user.set_gender(gender) user.set_contact_phone_1(contact_phone_1) user.set_contact_phone_2(contact_phone_2) user.set_dob(dob) user.set_ministry(ministry) user.set_group(group) user.set_password(password) user.set_comm_email(comm_email) user.set_comm_sms(comm_sms) user.set_comm_phone(comm_phone) # add the new user to the database and save the changes db.session.add(user) db.session.commit() # send confirmation email or sms if email: subject = "COP" msg_content = utils.compose_email_msg(member_id, password) utils.send_email(subject, email, msg_content) else: msg = utils.compose_sms_msg(member_id, password) utils.send_sms(msg, contact_phone_1) # return the success response to Ajax # return json.dumps({'status':'OK', 'message': 'successful'}) return Response(json.dumps({ 'status': 'OK', 'message': 'successful' }), status=200, mimetype='application/json') except Exception as e: print(e) print(form) return Response(json.dumps({ 'status': 'FAIL', 'message': 'Fatal error' }), status=400, mimetype='application/json')