def init(app): """Init restapi.""" L.init() @app.route("/library/", defaults={"path": ""}) @app.route("/library/<path:path>") # Returns metadata of path in json format def library(path): try: return jsonify(L.library(path)), 200, {'Access-Control-Allow-Origin': '*'} except IOError: abort(404) @app.route('/media/<path:path>.<regex("\w+"):format>') def media_content_tc(path, format): # Returns media file start = float(request.args.get("start") or 0) vcodec = request.args.get("vcodec") acodec = request.args.get("acodec") try: mime = L.transcodeMime(format) return Response(response=L.transcode(path, start, format, vcodec, acodec), status=200, mimetype=mime, headers={'Access-Control-Allow-Origin': '*', "Content-Type": mime, "Content-Disposition": "inline", "Content-Transfer-Enconding": "binary"}) except FileNotFoundError: abort(404) @app.route('/icon/<path:path>.jpg') def media_content_icon(path): # returns icon file try: return Response(response=L.icon(path), status=200, mimetype='image/jpg', headers={'Access-Control-Allow-Origin': '*', "Content-Type": "image/jpg", "Content-Disposition": "inline", "Content-Transfer-Enconding": "binary"}) except FileNotFoundError: abort(404)
def handler(event, context): print(json.dumps(event, sort_keys=True)) library.init() resource_path = event['path'] if resource_path == '/group': return group_handler(event, context) elif resource_path == '/task': return task_handler(event, context) elif resource_path == '/post': return post_handler(event, context) elif resource_path == '/profile': return profile_handler(event, context) return generate_error_response(400, 'Unsupported path: ' + resource_path)
def lambda_handler(event, context): library.init() index.init_db_connection() index.init_boto3_client() global boto_cognito_client boto_cognito_client = boto3.client( 'cognito-idp', aws_access_key_id=aws_config.aws_access_key_id, aws_secret_access_key=aws_config.aws_secret_access_key) table_name = 'Tasks' timeFormat = "%Y-%m-%dT%H:%M:%S" sql = 'SELECT taskDuration, taskUser, taskID, lastRotated, taskTitle ' \ 'FROM %s ' \ 'WHERE taskSolved = FALSE' % ( table_name) rows = index.execute_sql(sql) for row in rows: d = {} d['taskUser'] = row[1] d['lastRotated'] = row[3] # print("old:" + str(d['lastRotated'])) # if d['lastRotated'] + d['taskDuration'] < d['lastRotated'] = row[3] + timedelta(minutes=row[0]) # print("new: " + str(d['lastRotated'])) if (datetime.now() - d['lastRotated']).total_seconds() > 0: new_order = rotate_user(d['taskUser']) d['taskUser'] = new_order[0] sql = index.generate_sql_clause("UPDATE", table_name, d) sql += " WHERE taskID = %s" % row[2] index.execute_sql(sql) # send notification to user user_name = new_order[1] try: boto_cognito_client.admin_get_user( UserPoolId=aws_config.UserPoolId, Username=user_name) except boto_cognito_client.exceptions.UserNotFoundException as e: pass print("Invalid user: %s" % user_name) else: new_segment = index.create_pinpoint_segment(user_name, None) index.create_campaign(new_segment, user_name, row[4], "It's your turn now ^_^") print("%s has been notified for %s" % (user_name, row[4])) index.close_db_connection()
def __init__(self, app): super().__init__() self.setupUi(self) # Это нужно для инициализации нашего дизайна # config loading self.config = config.Config(self) library.init(self.config.getLibraryDirs()) self.lyrics = Lyrics(self.config) # make post ui setup after library is initialized self.postSetupUi() # Load config goes after postSetupUi() to be able to restore columns width self.config.load(app) self.connectEvents(app) self.tableModel.refreshPlaylist()
import math import time import BmrbAtomNames import fasta start=time.clock() parser = ApplicationArgumentParser(description="assign noesy peaks", add_help=True) parser.add_argument("-peaks",nargs='*', help='files with peak-lists in xeasy format',default=None); parser.add_argument("-prot", help="chemical shift file",default=None); #parser.add_argument("-ref_prot", help="ref chemical shift file",default=None); parser.add_argument("-fasta", help="fasta file",default=None); parser.add_argument("-output", help="how to call the output files", default="default" ) library.add_standard_args( parser ) args=parser.parse_args() library.init( args ) tr=Tracer('main') # def assign_peak_consistently( peak, state, resonances, scorefxn ): # #known_fm=KnownFreqMatcher( resonances ) # fm=SelfUpdateFreqMatcher( resonances, state ) # known_dist=ScoreDistanceMatcher( FragDistanceScore(), abs(math.log(0.3)), 0 ) # known_dist.max_sequence_separation=2 # # print '%20s'%'assign peak: ', peak # fm.default_answer=False # for match in peak.matches( molecule, random_samples=1, frequency_matcher=fm, distance_matcher=known_dist ): # # print '%20s'%'FIX: ', match # return match
def main(w0 = None): # tm should translate unknown words as-is with probability 1 w = w0 if w is None: # lm_logprob, distortion penenalty, direct translate logprob, direct lexicon logprob, inverse translation logprob, inverse lexicon logprob if opts.weights == "no weights specify": w = [1.0/7] * 7 # w = [1.76846735947, 0.352553835525, 1.00071564481, 1.49937872683, 0.562198294709, -0.701483985454, 1.80395218437] else: w = [float(line.strip()) for line in open(opts.weights)] sys.stderr.write(str(w) + '\n') tm = models.TM(opts.tm, opts.k, opts.mute) lm = models.LM(opts.lm, opts.mute) # ibm_t = {} ibm_t = init('./data/ibm.t.gz') french = [tuple(line.strip().split()) for line in open(opts.input).readlines()[:opts.num_sents]] french = french[opts.start : opts.end] bound_width = float(opts.bwidth) for word in set(sum(french,())): if (word,) not in tm: tm[(word,)] = [models.phrase(word, [0.0, 0.0, 0.0, 0.0])] nbest_output = [] total_prob = 0 if opts.mute == 0: sys.stderr.write("Start decoding %s ...\n" % (opts.input,)) for idx,f in enumerate(french): if opts.mute == 0: sys.stderr.write("Decoding sentence #%s ...\n" % (str(idx))) initial_hypothesis = hypothesis(lm.begin(), 0.0, 0, 0, None, None, None) heaps = [{} for _ in f] + [{}] heaps[0][lm.begin(), 0, 0] = initial_hypothesis for i, heap in enumerate(heaps[:-1]): # maintain beam heap # front_item = sorted(heap.itervalues(), key=lambda h: -h.logprob)[0] for h in sorted(heap.itervalues(),key=lambda h: -h.logprob)[:opts.s]: # prune # if h.logprob < front_item.logprob - float(opts.bwidth): # continue fopen = prefix1bits(h.coverage) for j in xrange(fopen,min(fopen+1+opts.disord, len(f)+1)): for k in xrange(j+1, len(f)+1): if f[j:k] in tm: if (h.coverage & bitmap(range(j, k))) == 0: for phrase in tm[f[j:k]]: lm_prob = 0 lm_state = h.lm_state for word in phrase.english.split(): (lm_state, prob) = lm.score(lm_state, word) lm_prob += prob lm_prob += lm.end(lm_state) if k == len(f) else 0.0 coverage = h.coverage | bitmap(range(j, k)) # logprob = h.logprob + lm_prob*w[0] + getDotProduct(phrase.several_logprob, w[2:6]) + abs(h.end+1-j)*w[1] + ibm_model_1_w_score(ibm_t, f, phrase.english)*w[6] logprob = h.logprob logprob += lm_prob*w[0] logprob += getDotProduct(phrase.several_logprob, w[1:5]) # logprob += opts.diseta*abs(h.end+1-j)*w[1] logprob += ibm_model_1_w_score(ibm_t, f, phrase.english)*w[5] logprob += (len(phrase.english.split()) - (k - j)) * w[6] new_hypothesis = hypothesis(lm_state, logprob, coverage, k, h, phrase, abs(h.end + 1 - j)) # add to heap num = onbits(coverage) if (lm_state, coverage, k) not in heaps[num] or new_hypothesis.logprob > heaps[num][lm_state, coverage, k].logprob: heaps[num][lm_state, coverage, k] = new_hypothesis winners = sorted(heaps[-1].itervalues(), key=lambda h: -h.logprob)[0:opts.nbest] def get_lm_logprob(test_list): stance = [] for i in test_list: stance += (i.split()) stance = tuple(stance) lm_state = ("<s>",) score = 0.0 for word in stance: (lm_state, word_score) = lm.score(lm_state, word) score += word_score return score def get_list_and_features(h, idx_self): lst = []; features = [0, 0, 0, 0, 0, 0, 0] current_h = h; while current_h.phrase is not None: # print current_h lst.append(current_h.phrase.english) # features[1] += current_h.distortionPenalty features[1] += current_h.phrase.several_logprob[0] # translation feature 1 features[2] += current_h.phrase.several_logprob[1] # translation feature 2 features[3] += current_h.phrase.several_logprob[2] # translation feature 3 features[4] += current_h.phrase.several_logprob[3] # translation feature 4 current_h = current_h.predecessor lst.reverse() features[0] = get_lm_logprob(lst) # language model score features[5] = ibm_model_1_score(ibm_t, f, lst) features[6] = len(lst) - len(french[idx_self]) return (lst, features) for win in winners: # s = str(idx) + " ||| " (lst, features) = get_list_and_features(win, idx) print local_search.local_search(lst, lm)
def command_init(*args, **kwargs): library.init(*args, **kwargs) extractor.init()
## init for decoder part lm = models.LM(opts.lm, opts.mute) tm = models.TM(opts.tm, opts.k, opts.mute) french = [tuple(line.strip().split()) for line in open(opts.input).readlines()[:opts.num_sents]] bound_width = float(opts.bwidth) for word in set(sum(french,())): if (word,) not in tm: tm[(word,)] = [models.phrase(word, [0.0, 0.0, 0.0, 0.0])] # ibm_t = {} ibm_t = library.init('./data/ibm.t.gz') ######################################################################################################################################## ## init for reranker part references = [[], [], [], []] sys.stderr.write("Reading English Sentences ... \n") def readReference(ref_fileName): ref = [] for i, line in enumerate(open(ref_fileName)): # Initialize references to correct english sentences ref.append(line) if i%1000 == 0: sys.stderr.write(".") sys.stderr.write("\n") return ref
def command_init(): library.init()