def store_tweet_payload_and_tweeter_from_twitter(payload_dicts_from_twitter, tweets): tweets_by_tweet_id = defaultdict(list) for tweet in tweets: tweets_by_tweet_id[tweet.tweet_id].append(tweet) for payload_dict in payload_dicts_from_twitter: tweet_id = payload_dict["id_str"] logger.debug("saving unsaved parts for tweet_id {tweet_id}".format( tweet_id=tweet_id)) for tweet in tweets_by_tweet_id[tweet_id]: if not tweet.payload: tweet.payload = payload_dict logger.info(u"updated tweet payload for {tweet_id} {tiid}".format( tweet_id=tweet_id, tiid=tweet.tiid)) if "user" in payload_dict: try: tweet.tweeter.set_attributes_from_twitter_data(payload_dict["user"]) except AttributeError: tweeter = Tweeter.query.get(tweet.screen_name) if not tweeter: tweeter = Tweeter(screen_name=tweet.screen_name) db.session.add(tweeter) tweeter.set_attributes_from_twitter_data(payload_dict["user"]) tweet.tweeter = tweeter commit(db) if tweet.tweeter: logger.info(u"updated tweeter followers for {screen_name}".format( screen_name=tweet.tweeter.screen_name))
def collect_embed(url_slug=None, min_url_slug=None): q = profile_query(url_slug, min_url_slug) start_time = datetime.datetime.utcnow() number_considered = 0.0 number_markups = 0.0 for profile in windowed_query(q, Profile.url_slug, 25): logger.debug("-->collecting embed for {url_slug}".format( url_slug=profile.url_slug)) for product in profile.display_products: if not product.embed_markup: number_considered += 1 try: embed_markup = product.get_embed_markup() except Exception: print "got an exception, skipping", product.aliases.best_url continue if embed_markup: print "GOT MARKUP for", product.tiid, product.host, product.aliases.best_url, embed_markup # print " got an embed for", product.genre, "!" product.embed_markup = embed_markup db.session.add(product) commit(db) number_markups += 1 elapsed_seconds = (datetime.datetime.utcnow() - start_time).seconds print "elapsed seconds=", elapsed_seconds, "; number per second=", number_considered/(0.1+elapsed_seconds)
def debug_biblio_for_live_profiles(args): url_slug = args.get("url_slug", None) min_url_slug = args.get("min_url_slug", None) q = profile_query(url_slug, min_url_slug) from totalimpact.providers.bibtex import Bibtex bibtex_provider = Bibtex() from totalimpactwebapp.product import put_biblio_in_product for profile in windowed_query(q, Profile.url_slug, 25): logger.info(u"in debug_biblio_for_live_profiles for {url_slug}".format( url_slug=profile.url_slug)) for product in profile.products_not_removed: if product.biblio \ and hasattr(product.biblio, "journal") \ and "journal =" in product.biblio.journal \ and hasattr(product.biblio, "full_citation") \ and "journal" in product.biblio.full_citation: print "got one:", product.tiid, product.biblio.full_citation aliases = bibtex_provider.member_items(product.biblio.full_citation) print aliases for alias in aliases: (ns, nid) = alias if ns=="biblio": product = put_biblio_in_product(product, nid, provider_name="bibtex") print product.biblio db.session.merge(product) commit(db) else: pass
def update_mendeley_countries_for_live_profiles(url_slug=None, min_url_slug=None): q = profile_query(url_slug, min_url_slug) for profile in windowed_query(q, Profile.url_slug, 25): logger.info(u"{url_slug} processing mendeley countries".format( url_slug=profile.url_slug)) for product in profile.display_products: metric = product.get_metric_by_name("mendeley", "countries") if metric: snap = metric.most_recent_snap if not snap.raw_value: # logger.error(u"{url_slug} has NO SNAP for tiid {tiid}".format( # url_slug=profile.url_slug, tiid=product.tiid)) # don't save this one to the db continue new_snap_value = {} for country_name, country_count in snap.raw_value.iteritems(): if country_name in country_iso_by_name: iso = country_iso_by_name[country_name] new_snap_value[iso] = country_count # logger.error(u"{country_name} -> {iso}".format( # country_name=country_name, iso=iso)) else: if len(country_name) != 2: logger.error(u"Can't find country {country} in lookup".format( country=country_name)) new_snap_value[country_name] = country_count if new_snap_value: logger.info(u"New snap value {snap}".format( snap=new_snap_value)) snap.raw_value = new_snap_value db.session.add(snap) commit(db)
def create_products_from_alias_tuples(profile_id, alias_tuples): tiid_alias_mapping = {} clean_aliases = [normalize_alias_tuple(ns, nid) for (ns, nid) in alias_tuples] tiids_to_update = [] new_products = [] for alias_tuple in clean_aliases: new_product = Product(profile_id=profile_id) (ns, nid) = alias_tuple if ns=="biblio": new_product = put_biblio_in_product(new_product, nid, provider_name="bibtex") else: new_product = put_aliases_in_product(new_product, [alias_tuple]) new_product.set_last_refresh_start() new_products += [new_product] tiids_to_update += [new_product.tiid] db.session.add_all(new_products) commit(db) # has to be after commits to database start_product_update(profile_id, tiids_to_update, "high") return new_products
def subscribe(profile, stripe_token, coupon=None, plan="base-yearly"): full_name = u"{first} {last}".format(first=profile.given_name, last=profile.surname) stripe_customer = stripe.Customer.create( description=full_name, email=profile.email, plan=plan, coupon=coupon, card=stripe_token ) # the stripe.Customer.create() call can throw all sort of exceptions here, # including InvalidRequestError and CardError. if it does, none of the code # below will run of course. the caller is responsible for handling these # errors. logger.debug(u"Made a Stripe ID '{stripe_id}' for profile '{slug}'".format( stripe_id=stripe_customer.id, slug=profile.url_slug )) profile.stripe_id = stripe_customer.id db.session.merge(profile) commit(db) return stripe_customer
def add_to_database_if_nonzero( product, new_content, method_name, provider_name): updated_product = None if new_content and product: if method_name=="aliases": updated_product = put_aliases_in_product(product, new_content) elif method_name=="biblio": updated_product = put_biblio_in_product(product, new_content, provider_name) elif method_name=="metrics": for metric_name in new_content: if new_content[metric_name]: updated_product = put_snap_in_product(product, metric_name, new_content[metric_name]) else: logger.warning(u"ack, supposed to save something i don't know about: " + str(new_content)) if updated_product: updated_product.last_refresh_finished = datetime.datetime.utcnow() db.session.merge(updated_product) commit(db) else: product.last_refresh_finished = datetime.datetime.utcnow() db.session.add(product) commit(db) return
def unsubscribe(profile): cu = stripe.Customer.retrieve(profile.stripe_id) cu.delete() # permadeletes the customer obj on Stripe; all data lost profile.stripe_id = None # now delete from our Profile db.session.merge(profile) commit(db) return profile
def patch_user_about(profile_id): profile = get_user_for_response(profile_id, request) abort_if_user_not_logged_in(profile) profile.patch(request.json["about"]) commit(db) return json_resp_from_thing({"about": profile.dict_about()})
def log_interaction_event(tiid, ip, event, headers, timestamp): new_interaction = Interaction( tiid=tiid, timestamp=timestamp, event=event, ip=ip, headers=headers) db.session.add(new_interaction) commit(db)
def log_drip_email(profile, drip_milestone): now = datetime.datetime.utcnow() new_drip_email = DripEmail( profile_id=profile.id, profile_age_days=(now - profile.created).days, date_sent=now, drip_milestone=drip_milestone, email_version="first" ) db.session.add(new_drip_email) commit(db)
def current_user_notifications(notification_name): # hardcode for now notification_name = "new_metrics_notification_dismissed" # it's Not RESTful to do this in a GET, but, whatevs. if request.args.get("action") == "dismiss": g.user.new_metrics_notification_dismissed = datetime.datetime.now() db.session.merge(g.user) commit(db) return json_resp_from_thing({"user": g.user.dict_about()})
def save_profile_last_viewed_profile_timestamp(profile_id, timestamp=None): # logger.debug(u"In save_profile_last_viewed_profile_timestamp with profile {profile_id}".format( # profile_id=profile_id)) profile = Profile.query.get(profile_id) db.session.merge(profile) if not timestamp: timestamp = now_in_utc() profile.last_viewed_profile = timestamp commit(db) return True
def save_profile_last_refreshed_timestamp(profile_id, timestamp=None): # logger.debug(u"In save_profile_last_refreshed_timestamp with profile {profile_id}".format( # profile_id=profile_id)) profile = Profile.query.get(profile_id) db.session.merge(profile) if not timestamp: timestamp = now_in_utc() profile.last_refreshed = timestamp profile.next_refresh = profile.last_refreshed + datetime.timedelta(days=profile.refresh_interval) commit(db) return True
def delete_products_from_profile(profile, tiids_to_delete): number_deleted = 0 for product in profile.products_not_removed: if product.tiid in tiids_to_delete: number_deleted += 1 product.removed = now_in_utc() db.session.add(product) commit(db) return True
def build_skipfiles(chunksize, infilename): outfilename = skiplook.skipfilename(infilename) if not os.path.exists(outfilename): print "building skipfile for `%s` " % infilename outfile = file(outfilename + '.new', 'w') sparse.main(chunksize, infilename, outfile) util.commit(outfile) os.rename(outfilename + '.new', outfilename) size = os.stat(outfilename).st_size if size > chunksize: build_skipfiles(chunksize, outfilename) else: print "don't need to build skip file for `%s` " % outfilename
def index_until_3(filename, reader, end_bytes, index_out, index_out_sorted): index_file = open(index_out, 'w') # XXX no O_EXCL def add_term(term, offset): index_file.write('%s %s\n' % (term, offset)) index_until(reader, add_term, end_bytes) util.commit(index_file) sort_file(index_out, index_out_sorted) util.commit(open(index_out_sorted, 'r+')) # sort(1) might not fsync(2) # XXX raceable, but that's okay seg = fresh_name(os.path.join(index_dir_for(filename), 'index-segment.%s' % os.getpid())) os.rename(index_out_sorted, seg) buildskips.build_skipfiles(chunksize, seg)
def user_password_modify(id): current_password = request.json.get("currentPassword", None) new_password = request.json.get("newPassword", None) id_type = request.args.get("id_type", "url_slug") # url_slug is default try: if id_type == "reset_token": user = reset_password_from_token(id, request.json["newPassword"]) else: user = reset_password(id, id_type, current_password, new_password) except PasswordResetError as e: abort_json(403, e.message) commit(db) return json_resp_from_thing({"about": user.dict_about()})
def after_refresh_complete(tiid, failure_message=None): # logger.info(u"here in after_refresh_complete with {tiid}".format( # tiid=tiid)) product = Product.query.get(tiid) if not product: logger.warning(u"Empty product in after_refresh_complete for tiid {tiid}".format( tiid=tiid)) return None product.embed_markup = product.get_embed_markup() product.set_refresh_status(myredis, failure_message) #need commit after this db.session.merge(product) commit(db) sleep_seconds = random.random() * 10 logger.info(u"Sleeping {sleep_seconds}s in after_refresh_complete for {tiid} before checking done refreshes".format( sleep_seconds=sleep_seconds, tiid=tiid)) time.sleep(sleep_seconds) profile = Profile.query.get(product.profile_id) if not profile: print "\n\n-------> no profile after done all refreshes?!?", product.profile_id, "\n\n\n---------------\n\n\n" return None refresh_status = profile.get_refresh_status() if refresh_status.is_done_refreshing and refresh_status.refresh_state == "progress bar": print "\n\n-------> done all refreshes", product.profile_id, "\n\n\n---------------\n\n\n" logger.info(u"just_finished_profile_refresh for {tiid}, now deduping etc".format( tiid=tiid)) save_profile_refresh_status(profile, RefreshStatus.states["CRUNCHING"]) logger.info(u"deduplicating for {url_slug}".format( url_slug=profile.url_slug)) deleted_tiids = profile.remove_duplicates() logger.info(u"parse_and_save_tweets for {url_slug}".format( url_slug=profile.url_slug)) profile.parse_and_save_tweets() save_profile_refresh_status(profile, RefreshStatus.states["ALL_DONE"])
def create_profile_from_slug(url_slug, profile_request_dict, db): # logger.debug(u"in create_profile_from_slug {url_slug} with profile_dict {profile_request_dict}".format( # url_slug=url_slug, profile_request_dict=profile_request_dict)) # have to explicitly unicodify ascii-looking strings even when encoding # is set by client, it seems: profile_dict = {k: unicode(v) for k, v in profile_request_dict.iteritems()} profile_dict["url_slug"] = unicode(url_slug) # all emails should be lowercase profile_dict["email"] = profile_dict["email"].lower() # move password to temp var so we don't instantiate the Profile with it... # passwords have to be set with a special setter method. password = profile_dict["password"] del profile_dict["password"] # make sure this slug isn't being used yet, in any upper/lower case combo profile_with_this_slug = Profile.query.filter( func.lower(Profile.url_slug) == func.lower(profile_dict["url_slug"]) ).first() if profile_with_this_slug is not None: profile_dict["url_slug"] += str(random.randint(1, 9999)) # make sure this email isn't being used yet profile_with_this_email = Profile.query.filter( Profile.email == profile_dict["email"] ).first() if profile_with_this_email is not None: raise EmailExistsError # the caller needs to deal with this. # ok, let's make a profile: profile = Profile(**profile_dict) db.session.add(profile) profile.set_password(password) commit(db) logger.debug(u"Finished creating profile {id} with slug '{slug}'".format( id=profile.id, slug=profile.url_slug )) return profile
def ip_deets(): from totalimpactwebapp.interaction import Interaction from totalimpactwebapp.interaction import get_ip_insights q = db.session.query(Interaction) cache = {} for interaction in windowed_query(q, Interaction.ip, 25): if interaction.country: continue if interaction.ip in cache: interaction.country, interaction.user_type = cache[interaction.ip] else: insights = get_ip_insights(interaction.ip) interaction.country = insights.country.iso_code interaction.user_type = insights.traits.user_type cache[interaction.ip] = interaction.country, interaction.user_type print interaction.country, interaction.user_type db.session.add(interaction) commit(db)
def index_some(filename): mbox = open(filename) index_dir = index_dir_for(filename) start_bytes = get_indexed_up_to(index_dir) reader = MessageReader(mbox, start_bytes) if reader.end_of_file: return False # 50 megs is chosen to be around a minute's worth of work, because # that's a reasonable amount of progress to lose if you hit ^C indexed_up_to = index_until_2(filename, reader, start_bytes + 50*1000*1000) # XXX no O_EXCL new_indexed_up_to = os.path.join(index_dir, '.new-indexed-up-to') new_indexed_up_to_file = open(new_indexed_up_to, 'w') new_indexed_up_to_file.write('%s\n' % reader.next_message_offset) util.commit(new_indexed_up_to_file) os.rename(new_indexed_up_to, os.path.join(index_dir, '.indexed-up-to')) return True
def write_to_pinboard(profile_id, list_of_pins, col): board = Pinboard.query.filter_by(profile_id=profile_id).first() if board: # logger.info(u"saving board for {profile_id}: previous contents: {contents}".format( # profile_id=profile_id, contents=board.contents)) try: board.contents[col] = list_of_pins except TypeError: new_contents = new_contents_dict() new_contents[col] = list_of_pins board.contents = new_contents board.timestamp = datetime.datetime.utcnow() else: board = save_new_board(profile_id) board.contents[col] = list_of_pins # logger.info(u"saving board for {profile_id}: new contents: {contents}".format( # profile_id=profile_id, contents=board.contents)) db.session.merge(board) commit(db) return board.contents
def product_pdf(tiid): if request.method == "GET": try: product = get_product(tiid) pdf = product.get_pdf() db.session.merge(product) # get pdf might have cached the pdf commit(db) if pdf: resp = make_response(pdf, 200) resp.mimetype = "application/pdf" resp.headers.add("Content-Disposition", "attachment; filename=impactstory-{tiid}.pdf".format( tiid=tiid)) return resp else: abort_json(404, "This product exists, but has no pdf.") except IndexError: abort_json(404, "That product doesn't exist.") except S3ResponseError: abort_json(404, "This product exists, but has no pdf.")
def patch_biblio(tiid, patch_dict, provider="user_provided"): product = Product.query.get(tiid) for biblio_name, biblio_value in patch_dict.iteritems(): biblio_row_object = BiblioRow.query.filter_by( tiid=tiid, provider=provider, biblio_name=biblio_name).first() if biblio_row_object: biblio_row_object.biblio_value = biblio_value biblio_row_object.provider = provider else: biblio_row_object = BiblioRow( biblio_name=biblio_name, biblio_value=biblio_value, provider=provider) product.biblio_rows.append(biblio_row_object) if biblio_name == "free_fulltext_url": product.embed_markup = product.get_embed_markup() #alters an attribute, so caller should commit commit(db) return {"product": product}
def hydrate_twitter_text_and_followers(profile_id, altmetric_twitter_posts): logger.info(u"in hydrate_twitter_text_and_followers for profile {profile_id}".format( profile_id=profile_id)) tweets_to_hydrate_from_twitter = [] # get them all at once into the session so gets below go faster tweets = Tweet.query.filter(Tweet.profile_id==profile_id) tweet_dict = dict([((tweet.tweet_id, tweet.tiid), tweet) for tweet in tweets]) for tiid, post_list in altmetric_twitter_posts.iteritems(): for post in post_list: #### store tweet and tweeter stuff from altmetric tweet_id = post["tweet_id"] screen_name = post["author"]["id_on_source"] if (tweet_id, tiid) in tweet_dict.keys(): tweet = tweet_dict[(tweet_id, tiid)] if not tweet.tweet_text and not tweet.is_deleted: tweets_to_hydrate_from_twitter.append(tweet) else: if not Tweet.query.get((tweet_id, tiid)): tweet = Tweet(tweet_id=tweet_id, tiid=tiid) tweet.set_attributes_from_altmetric_post(post) tweet.profile_id = profile_id tweets_to_hydrate_from_twitter.append(tweet) db.session.add(tweet) if not tweet.tweeter: tweeter = Tweeter.query.get(screen_name) if not tweeter: tweeter = Tweeter(screen_name=screen_name) db.session.add(tweeter) tweeter.set_attributes_from_altmetric_post(post) commit(db) logger.info(u"before tweets_to_hydrate_from_twitter for {profile_id}".format( profile_id=profile_id)) if tweets_to_hydrate_from_twitter: # save the altmetric stuff first commit(db) tweet_ids = [tweet.tweet_id for tweet in tweets_to_hydrate_from_twitter] logger.info(u"calling get_and_save_tweet_text_and_tweeter_followers for profile {profile_id}".format( profile_id=profile_id)) get_and_save_tweet_text_and_tweeter_followers(tweets_to_hydrate_from_twitter) commit(db) else: logger.info(u"no tweets to hydrate for profile {profile_id}".format( profile_id=profile_id)) return
'us.census.tiger.county_clipped')) foo ORDER BY ST_NPoints(the_geom) DESC LIMIT 50;'''): q_formatted = q.format( schema='cdb_observatory.' if USE_SCHEMA else '', ) start = time() resp = query(q_formatted) end = time() print('{} for {}'.format(int(end - start), q_formatted)) if q.lower().startswith('insert'): if resp.rowcount == 0: raise Exception('''Performance fixture creation "{}" inserted 0 rows, this will break tests. Check the query to determine what is going wrong.'''.format(q_formatted)) commit() ARGS = { ('OBS_GetMeasureByID', None): "name, 'us.census.acs.B01001002', '{}'", ('OBS_GetMeasure', 'predenominated'): "{}, 'us.census.acs.B01003001', null, {}", ('OBS_GetMeasure', 'area'): "{}, 'us.census.acs.B01001002', 'area', {}", ('OBS_GetMeasure', 'denominator'): "{}, 'us.census.acs.B01001002', 'denominator', {}", ('OBS_GetCategory', None): "{}, 'us.census.spielman_singleton_segments.X10', {}", ('_OBS_GetGeometryScores', None): "{}, NULL" } def record(params, results): sha = os.environ['OBS_EXTENSION_SHA'] msg = os.environ.get('OBS_EXTENSION_MSG')
screen_names_string = ",".join([tweeter.screen_name for tweeter in tweeters]) print ", ".join([tweeter.screen_name for tweeter in tweeters]) try: response = client.api.users.lookup.post(screen_name=screen_names_string) handle_all_user_lookups(response.data, tweeters) except TwitterApiError, e: logger.exception("TwitterApiError error, skipping") except TwitterClientError, e: logger.exception("TwitterClientError error, skipping") except TwitterRateLimitError, e: logger.exception("TwitterRateLimitError error, skipping") # not totally sure what else I should do here. retry somehow, or catch on cleanup run? commit(db) return # example payload from twitter: https://dev.twitter.com/rest/reference/get/users/lookup class Tweeter(db.Model): screen_name = db.Column(db.Text, primary_key=True) twitter_id = db.Column(db.Integer) # alter table tweeter add twitter_id int4 followers = db.Column(db.Integer) name = db.Column(db.Text) description = db.Column(db.Text) location = db.Column(db.Text) # alter table tweeter add location text image_url = db.Column(db.Text) profile_url = db.Column(db.Text) # alter table tweeter add profile_url text
def delete_profile(profile): db.session.delete(profile) commit(db)
def upload_file_and_commit(product, file_to_upload, db): resp = product.upload_file(file_to_upload) commit(db) return resp
def commit(): if not os.getenv("DEV") == "true": util.commit( os.getenv("INPUT_COMMIT_MESSAGE", "Update contributors list")) else: print("Running in dev mode, not committing")
def save_profile_refresh_status(profile, status_string): profile.refresh_status = status_string db.session.add(profile) commit(db)
def _worker(self, data, addr): data = str(data, 'utf-8') print("\n") dataDict = json.loads(data) print(addr) print(data) ################################### # Request Handler ################################### if (dataDict["type"] == "Request"): #print("R-1") request = model.decodeJson(data) # Check the cache to see if already responded to the same request before self.lock.acquire() try: seek = self.conn.cursor().execute( ''' SELECT * from request where requestNumber like ? and IP like ? and client like ? ''', (request.requestNumber, str(addr[0]), request.requestClientName) ).fetchall() except Exception as e: print(e) pass if(len(seek) > 0): if (seek[0][0] == data): #print("Message deja vu") response = seek[0][1] self._sender(response, addr[0], int(request.requestClientName)) #self.s.sendto(response.encode(), (addr[0], int(request.requestClientName))) else: #print("Invalid message") response = model.Response(request.requestNumber, request.requestClientName, "The request number has been previously used for another message") response = model.encode(response) self._sender(response, addr[0], int(request.requestClientName)) #self.s.sendto(response.encode(), (addr[0], int(request.requestClientName))) self.lock.release() return #print("R-2") # Check if free slot is available, respond accordingly and update cache freeSlot = True try: seek = self.conn.cursor().execute( ''' SELECT * from booked where date=? and time=? and status!='Cancelled' ''', (request.date, request.time) ).fetchall() except Exception as e: print("Something went wrong") print(e) pass meetingNumber = -1 message = "" #print("R-3") if (len(seek) > self.meetingRoomNum): #print("R-4") response = model.Response(request.requestNumber, request.requestClientName, "No room available") message = model.encode(response) try: self._sender(message, addr[0], int(request.requestClientName)) #self.s.sendto(message.encode(), (addr[0], int(request.requestClientName))) except Exception as e: print(e) pass else: #print("R-5") invite = model.Invite(request.date, request.time, request.topic, addr[0], request.requestClientName, request.requestClientName) meetingNumber = invite.meetingNumber try: self.conn.cursor().execute( ''' INSERT INTO meetingNum(lastMeetingNum) VALUES (?) ''', (invite.meetingNumber,) ) self.conn.cursor().execute( ''' INSERT INTO invite(meetingNumber, invite, min) VALUES (?, ?, ?) ''', (invite.meetingNumber, model.encode(invite), request.minimum) ) except Exception as e: print(e) pass print("WTF") print(request.participant) for participant in request.participant: print("R-5-1") invite.targetName = participant[1] message = model.encode(invite) try: #print(participant[0]) #print(int(invite.targetName)) self._sender(message, participant[0], int(invite.targetName)) #self.s.sendto(message.encode(), (participant[0], int(invite.targetName))) except Exception as e: print(e) pass try: self.conn.cursor().execute( ''' INSERT INTO inviteList(meetingNumber, ip, client, status, message) VALUES (?, ?, ?, ?, ?) ''', (invite.meetingNumber, participant[0], participant[1], "Sent", message) ) except Exception as e: print(e) pass #print("R-6") self.conn.cursor().execute( ''' INSERT INTO request(request, prevResponse, IP, client, requestNumber, meetingNumber) VALUES (?, ?, ?, ?, ?, ?) ''', (data, message, addr[0], request.requestClientName, request.requestNumber, meetingNumber) ) util.commit(self.conn) self.lock.release() ################################### # Accept or Reject or Add handler ################################### if (dataDict["type"] == "Accept" or dataDict["type"] == "Reject" or dataDict["type"] == "Add"): # Note that a client will continuously ping the server (send its accept meeting or reject meeting message every 5 seconds) until it receves either a cancel, confirm, scheduled or not_scheduled message from the server acceptOrReject = model.decodeJson(data) # Does the referenced meeting exists? self.lock.acquire() invite = self.conn.cursor().execute( ''' SELECT * from invite where meetingNumber=? ''', (acceptOrReject.meetingNumber, ) ).fetchall() # If the client sent us a meeting number that does not exist in the system, we'll send a cancel message to avoid getting continuously pinged by the client instead of ignoring it if (len(invite) == 0): message = model.Cancel(acceptOrReject.meetingNumber, "Meeting does not exits") response = model.encode(message) self._sender(response, addr[0], int(acceptOrReject.clientName)) #self.s.sendto(response.encode(), (addr[0], int(acceptOrReject.clientName))) self.lock.release() return if (len(invite) > 0): originalInvite = model.decodeJson(invite[0][1]) requesterIP = originalInvite.requesterIP requesterPort = originalInvite.requesterName # Was this message sent by someone invited to the referenced meeting? inviteList = self.conn.cursor().execute( ''' SELECT * from inviteList where meetingNumber=? and ip=? and client=? ''', (acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) ).fetchall() # If the client is not invited to the meeting, was this an add request? if (len(inviteList) == 0): # If yes, we'll add it to the IP inviteList if the add request came from a new participant if (dataDict["type"] == "Add"): self.conn.cursor().execute( ''' INSERT INTO inviteList(meetingNumber, ip, client, status) VALUES (?, ?, ?, ?) ''', (acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName, "Added") ) # Refresh the invite list inviteList = self.conn.cursor().execute( ''' SELECT * from inviteList where meetingNumber=? and ip=? and client=? ''', (acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) ).fetchall() else: # If no we'll send a cancel message to avoid getting continuously pinged by the client instead of ignoring it print("Participant was not invited") message = model.Cancel(acceptOrReject.meetingNumber, "You are not invited to this meeting") response = model.encode(message) self._sender(response, addr[0], int(acceptOrReject.clientName)) #self.s.sendto(response.encode(), (addr[0], int(acceptOrReject.clientName))) self.lock.release() return if (dataDict["type"] == "Add"): # If add request, we'll notify the original requester added = model.Added(acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) message = model.encode(added) self._sender(message, requesterIP, int(requesterPort)) #self.s.sendto(message.encode(), (requesterIP, int(requesterPort))) # From now on, the "add" request will be treated as a "accept request" with the same logic acceptOrReject = model.Accept(acceptOrReject.meetingNumber, acceptOrReject.clientName) # Update the tally of accepted and refused participants # if (inviteList[0][3] == "Sent"): if (acceptOrReject.type == "Accept"): self.conn.cursor().execute( ''' UPDATE inviteList SET status=? where meetingNumber=? and ip=? and client=? ''', ("Accepted", acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) ) else: self.conn.cursor().execute( ''' UPDATE inviteList SET status=? where meetingNumber=? and ip=? and client=? ''', ("Refused", acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) ) # What's the total number of invitees for this meeting? totalInvites = self.conn.cursor().execute( ''' SELECT COUNT(*) from inviteList where meetingNumber=? ''',(acceptOrReject.meetingNumber,) ).fetchone()[0] # How many accepted so far? totalAcceptedSoFar = self.conn.cursor().execute( ''' SELECT COUNT(*) from inviteList where meetingNumber=? and status='Accepted' ''',(acceptOrReject.meetingNumber,) ).fetchone()[0] # How many refused or withdrawn so far? totalRefusedSoFar = self.conn.cursor().execute( ''' SELECT COUNT(*) from inviteList where meetingNumber=? and (status='Refused' or status='Withdrawn') ''',(acceptOrReject.meetingNumber,) ).fetchone()[0] # Based on the current accepted or refused tally, can the meeting still happen? minThreshold = invite[0][2] howManyCanStillAccept = totalInvites - totalRefusedSoFar # If insufficient responses to come to a conclusion, we stop here and wait for more responses and do nothing for now and none of the code below will execute ''' if (totalAcceptedSoFar < minThreshold and howManyCanStillAccept >= minThreshold): self.lock.release() return ''' # Note that since the scheduler works on a first come first served basis, while it guarantees a room was free at the time the request was made, by the time the meeting gets confirmed, another request might have taken the room # If that happens, we will send a cancel message instead freeSlot = True addMeeting = True ################## try: seek = self.conn.cursor().execute( ''' SELECT * from booked where date=? and time=? and status !="Cancelled" ''', (originalInvite.date, originalInvite.time) ).fetchall() ''' seek = self.conn.cursor().execute( ''' #SELECT * from booked where date=? and time=? and meetingId!=? and status!='Cancelled' ''', (originalInvite.date, originalInvite.time, originalInvite.meetingNumber) ).fetchall() ''' #print(originalInvite.meetingNumber) #print(len(seek)) except Exception as e: print("Something went wrong") print(e) pass # If the meeting has already been scheduled, do not add a new entry. if (len(seek) >= self.meetingRoomNum): meetingNumber = seek[0][2] #print("*!*!*!*!**!*!*!*!!*") seek = self.conn.cursor().execute( ''' SELECT * from booked where meetingId=? ''', (originalInvite.meetingNumber,) ).fetchall() #print(len(seek)) if (len(seek) == 0): freeSlot = False else: addMeeting = False ############### # If we have reached the min participant threshold for the first time, we will batch send messages to all those who have previously accepted the invite. Otherwise, a message will be sent only to the current correspondant # The original meeting creator will get a slightly different conformation than the rest of the participants; we will peek at the original invite cached by the server to find out the identity of the original meeting creator # The original meeting creator will get a new scheduled message with an updated list of participant each time a new participant accepts the invite after the original scheduled message was sent. acceptedParticipants = self.conn.cursor().execute( ''' SELECT * FROM inviteList where meetingNumber=? and status=? ''',(acceptOrReject.meetingNumber, "Accepted") ).fetchall() confirm = model.Confirm(originalInvite.meetingNumber, len(seek)+1) if (freeSlot==True and totalAcceptedSoFar == minThreshold): #print("DB - 1") if addMeeting: try: self.conn.cursor().execute( ''' INSERT INTO booked(date, time, meetingId, status, room) VALUES (?, ?, ?, ?, ?) ''',(originalInvite.date, originalInvite.time, originalInvite.meetingNumber, "booked", len(seek)+1) ) except Exception as e: print("Error Ocurred*****") print(e) pass try: self.conn.cursor().execute( ''' INSERT INTO meetingToRoom(meetingNumber, room) VALUES (?, ?) ''',(originalInvite.meetingNumber, len(seek)+1) ) except Exception as e: print("Error Ocurred*****") print(e) pass message = model.encode(confirm) #print("loc-0") seekMeetingRoom = self.conn.cursor().execute( ''' SELECT * from meetingToRoom where meetingNumber=? ''',(originalInvite.meetingNumber, ) ).fetchall() meetingRoom = -1 if (len(seekMeetingRoom)> 0): meetingRoom = seekMeetingRoom[0][1] listConfirmedParticipant = [] for participant in acceptedParticipants: pIp = participant[1] pName = participant[2] listConfirmedParticipant.append([pIp,pName]) if (pIp != requesterIP or pName != requesterPort): if (freeSlot==True and totalAcceptedSoFar == minThreshold): try: print("Confirm-0") self._sender(message, pIp, int(pName)) #self.s.sendto(message.encode(), (pIp, int(pName))) except Exception as e: print(e) pass # If we find out the number of accepted participants can no longer meet the min participant threshold or no room is available, we'll send a cancel request to every participant regardless of accept or refused status if (howManyCanStillAccept == (minThreshold -1) or freeSlot == False): allParticipants = self.conn.cursor().execute( ''' SELECT * FROM inviteList where meetingNumber=? and status!='withdrawn' ''',(acceptOrReject.meetingNumber,) ).fetchall() cancel = model.Cancel(originalInvite.meetingNumber, "Below Minimum Participant") if (freeSlot == False): cancel = model.Cancel(originalInvite.meetingNumber, "The room is no longer available due to another request confirming the room before you") message = model.encode(cancel) for participant in allParticipants: pIp = participant[1] pName = participant[2] if (pIp != requesterIP or pName != requesterPort): try: #print("Cancel-0") self._sender(message, pIp, int(pName)) #self.s.sendto(message.encode(), (pIp, int(pName))) except Exception as e: print(e) pass # Creating and sending message to requester. #print("loc-1") oldRequest = self.conn.cursor().execute( ''' SELECT * FROM request where meetingNumber=? ''', (originalInvite.meetingNumber,) ).fetchall() requestNumber = oldRequest[0][5] message = '' if (totalAcceptedSoFar >= minThreshold and freeSlot == True): scheduled = model.Scheduled(requestNumber, originalInvite.meetingNumber, meetingRoom, listConfirmedParticipant) # Testing message = model.encode(scheduled) try: #print("Schedule-2") self._sender(message, requesterIP, int(requesterPort)) #self.s.sendto(message.encode(), (requesterIP, int(requesterPort))) except Exception as e: print(e) pass if (howManyCanStillAccept < minThreshold or freeSlot == False): non_schedule = model.Non_Scheduled(requestNumber, originalInvite.meetingNumber, originalInvite.date, originalInvite.time, minThreshold, listConfirmedParticipant, originalInvite.topic) message = model.encode(non_schedule) try: #print("Non_Schedule-1") self._sender(message, requesterIP, int(requesterPort)) #self.s.sendto(message.encode(), (requesterIP, int(requesterPort))) except Exception as e: print(e) pass #print("loc-2") # If we already reached the min participant threshold before and a new participant accept the meeting, we will only send a confirmation in response to the current sender instead of a batch message to all participants # The requester should had received a new participant list including this new participant with the above code # Edge case handler-> sender did not receive the first confirm response. We'll resend the message here. if (freeSlot==True and totalAcceptedSoFar > minThreshold): if (requesterIP != addr[0] or requesterPort !=acceptOrReject.clientName): try: #print("confirm-1") confirm = model.Confirm(originalInvite.meetingNumber, meetingRoom) message = model.encode(confirm) self._sender(message, addr[0], int(acceptOrReject.clientName)) #self.s.sendto(message.encode(), (addr[0], int(acceptOrReject.clientName))) except Exception as e: print(e) pass # Edge case handler-> meeting cancelled, but sender did not receive the first cancelled response. We'll resend the message if (requesterIP != addr[0] or requesterPort !=acceptOrReject.clientName): if (howManyCanStillAccept < minThreshold -1): cancel = model.Cancel(originalInvite.meetingNumber, "Below Minimum Participant") message = model.encode(cancel) try: #print("cancel-1") self._sender(message, addr[0], int(acceptOrReject.clientName)) #self.s.sendto(message.encode(), (addr[0], int(acceptOrReject.clientName))) except Exception as e: print(e) pass self.lock.release() if (dataDict["type"] == "Withdraw"): withdraw = model.decodeJson(data) # Does the referenced meeting exists? self.lock.acquire() seek = self.conn.cursor().execute( ''' SELECT * from booked where meetingId=? ''', (withdraw.meetingNumber, ) ).fetchall() if (len(seek) > 0): # Was the person in the invite list? seek = self.conn.cursor().execute( ''' SELECT * from inviteList where meetingNumber=? and ip=? and client=? ''', (withdraw.meetingNumber, addr[0], int(withdraw.clientName)) ).fetchall() if (len(seek) > 0): # If yes, fetch the saved copy of the original invite to find out the ip and sessionName of the requester, the min threshold, etc. seek = self.conn.cursor().execute( ''' SELECT * from invite where meetingNumber=? ''', (withdraw.meetingNumber, ) ).fetchall() # And update the status of the invitee to withdrawn self.conn.cursor().execute( ''' UPDATE inviteList SET status=? where meetingNumber=? and ip=? and client=? ''', ("Withdrawn", withdraw.meetingNumber, addr[0], withdraw.clientName) ) if (len(seek)>0): inviteStr = seek[0][1] invite = model.decodeJson(inviteStr) minParticipant = int(seek[0][2]) # check to see if below min now # How many accepted now? totalAcceptedSoFar = self.conn.cursor().execute( ''' SELECT COUNT(*) from inviteList where meetingNumber=? and status='Accepted' ''',(withdraw.meetingNumber,) ).fetchone()[0] # Based on the current accepted rate, can the meeting still happen after the withdrawal? if (totalAcceptedSoFar < minParticipant): # cancel the entire meeting; send notification to all participants allParticipants = self.conn.cursor().execute( ''' SELECT * FROM inviteList where meetingNumber=? and status!='Withdrawn' ''',(withdraw.meetingNumber,) ).fetchall() self.conn.cursor().execute( ''' UPDATE booked SET status="Cancelled" where meetingId=? ''', (withdraw.meetingNumber,) ) cancel = model.Cancel(withdraw.meetingNumber, "Below Minimum Participant due to withdrawal") message = model.encode(cancel) for participant in allParticipants: pIp = participant[1] pName = participant[2] try: self._sender(message, pIp, int(pName)) #self.s.sendto(message.encode(), (pIp, int(pName))) except Exception as e: print(e) pass else: withdraw = model.Withdraw(withdraw.meetingNumber, withdraw.clientName, addr[0]) message = model.encode(withdraw) requesterIP = invite.requesterIP requesterPort = invite.targetName try: self._sender(message, requesterIP, int(requesterPort)) #self.s.sendto(message.encode(), (requesterIP, int(requesterPort))) except Exception as e: print(e) pass self.lock.release() if (dataDict["type"] == "Cancel"): cancel = model.decodeJson(data) #print("Cancel-1") self.lock.acquire() # Does the referenced meeting exists? seek = self.conn.cursor().execute( ''' SELECT * from booked where meetingId=? ''', (cancel.meetingNumber, ) ).fetchall() if (len(seek) > 0): #print("Cancel-2") # If yes, fetch the saved copy of the original invite to find out the ip and sessionName of the requester, the min threshold, etc. seek = self.conn.cursor().execute( ''' SELECT * from invite where meetingNumber=? ''', (cancel.meetingNumber, ) ).fetchall() if (len(seek)>0): #print("Cancel-3") inviteStr = seek[0][1] invite = model.decodeJson(inviteStr) requesterIP = invite.requesterIP requesterName = invite.requesterName # If the current message indeed was sent by the original meeting requester, we cancel the meeting and send a message to all participants if (requesterIP == addr[0] and requesterName == cancel.clientName): #print("Cancel-4") self.conn.cursor().execute( ''' UPDATE booked SET status="Cancelled" where meetingId=? ''', (cancel.meetingNumber,) ) # cancel the entire meeting; send notification to all participants allParticipants = self.conn.cursor().execute( ''' SELECT * FROM inviteList where meetingNumber=? and status!='Withdrawn' ''',(cancel.meetingNumber,) ).fetchall() cancel = model.Cancel(cancel.meetingNumber, "Cancelled by the organizer") message = model.encode(cancel) #print("Cancel-5") for participant in allParticipants: pIp = participant[1] pName = participant[2] try: self._sender(message, pIp, int(pName)) #self.s.sendto(message.encode(), (pIp, int(pName))) except Exception as e: print(e) pass self.lock.release()