def populateQueue(self): timeBorder = time.time() + REQUESTS_PER_USER * QUEUE_SIZE * WAIT_TIMEOUT cursor = self.connection.cursor() cursor.execute(''' SELECT id, request_time FROM users WHERE request_time < %s ORDER BY request_time ASC LIMIT %s ''' % (int(timeBorder), QUEUE_SIZE)) self.users = [user.User(id, rTime) for id, rTime in cursor.fetchall()] oldUsersCount = len(self.users) if len(self.users) < QUEUE_SIZE: cursor.execute(''' SELECT id, request_time FROM users WHERE request_time = %s ORDER BY id ASC LIMIT %s ''' % (NEW_USER_TIME, QUEUE_SIZE - oldUsersCount)) self.users.extend((user.User(id, rTime) for id, rTime in cursor.fetchall())) newUsersCount = len(self.users) - oldUsersCount log.write('users queue populated with %s users and %s new users' % (oldUsersCount, newUsersCount)) log.write('new users in queue: %s' % cursor.execute(''' SELECT COUNT(*) FROM users WHERE request_time = %s ''' % NEW_USER_TIME).fetchone()) cursor.close()
def whelp(bots): from bot import STATE import traceback global last_kill_id, last_whelp_time if time.time() < last_whelp_time + 60: return try: kills = rs.get('http://api.whelp.gg/last/' + str(last_kill_id)).json() notify = [] for k in kills: try: item_hull_ratio = (k['total_cost'] - k['hull_cost']) // k['hull_cost'] except ZeroDivisionError: item_hull_ratio = 0 # total > 30 billion or (total > 500 million and ratio > 7) if k['total_cost'] > 30e9 * 100 or (k['total_cost'] > 500e6 * 100 and item_hull_ratio > 7): notify.append(k) if k['kill_id'] > last_kill_id: last_kill_id = k['kill_id'] for b in bots: if b.state == STATE.IDENTIFIED and '#ellipsis' in b.config.channels: for k in notify: cost = '{:,d}'.format(k['total_cost'] // 100 // int(1e6)) line = '%s million ISK %s http://www.whelp.gg/kill/%d' % (cost, k['ship_name'], k['kill_id']) b.say('#ellipsis', line) last_whelp_time = time.time() except: log.write(traceback.format_exc())
def saveToDatabase(self): log.write('storing followers and posts to database...') cursor = self.connection.cursor() # save friends and followers into friends cursor.executemany(''' INSERT OR IGNORE INTO followers VALUES (?,?) ''', ((user.id, id) for user in self.users for id in user.followers)) cursor.executemany(''' INSERT OR IGNORE INTO followers VALUES (?,?) ''', ((id, user.id) for user in self.users for id in user.friends)) # save single posts cursor.executemany(''' INSERT OR IGNORE INTO posts VALUES (?,?,?) ''', ((user.id, p[0], p[1]) for user in self.users for p in user.posts if p[0] != 0 and p[1] != 0)) # save reposts cursor.executemany(''' INSERT OR IGNORE INTO posts VALUES (?,?,?) ''', ((p[2], p[3], p[4]) for user in self.users for p in user.reposts if p[0] != 0 and p[1] != 0 and p[3] != 0 and p[4] != 0)) cursor.executemany(''' INSERT OR IGNORE INTO reposts VALUES (?,?,?,?,?) ''', ((user.id, p[0], p[1], p[2], p[3]) for user in self.users for p in user.reposts if p[0] != 0 and p[1] != 0 and p[3] != 0 and p[4] != 0)) self.connection.commit() cursor.close()
def saveToFiles(self): log.write('storing followers and posts to files...') # save friends and followers into friends f = open('followers' + self.fileSuffix + '.csv', 'ab') try: writer = csv.writer(f) writer.writerows(((user.id, id) for user in self.users for id in user.followers)) writer.writerows(((id, user.id) for user in self.users for id in user.friends)) finally: f.close() # save posts f = open('posts' + self.fileSuffix + '.csv', 'ab') try: writer = csv.writer(f) writer.writerows(((user.id, p[0], p[1]) for user in self.users for p in user.posts if p[0] != 0 and p[1] != 0)) writer.writerows(((p[2], p[3], p[4]) for user in self.users for p in user.reposts if p[0] != 0 and p[1] != 0 and p[3] != 0 and p[4] != 0)) finally: f.close() # save reposts f = open('reposts' + self.fileSuffix + '.csv', 'ab') try: writer = csv.writer(f) writer.writerows(((user.id, p[0], p[1], p[2], p[3]) for user in self.users for p in user.reposts if p[0] != 0 and p[1] != 0 and p[3] != 0 and p[4] != 0)) finally: f.close()
def add(self, r, estcost=0.0): self.nonterminals.add(r.lhs) for f in r.frhs: if isinstance(f, rule.Nonterminal): self.nonterminals.add(f.clearindex()) if r.arity() == 1 and len(r.frhs) == 1: log.write("unary rule: %s\n" % r) f = r.frhs[0].clearindex() self.unary_rules.setdefault(f, RuleBin(self.threshold, self.limit)).add(estcost, r) self.unary_less_than.add((f, r.lhs)) else: cur = self.root for f in r.frhs: if isinstance(f, rule.Nonterminal): f = f.clearindex() cur[1].setdefault(f, [None, {}]) cur = cur[1][f] if cur[0] is None: cur[0] = RuleBin(self.threshold, self.limit) self.rulebin_count += 1 bin = cur[0] bin.add(estcost, r) bin.prune() self.count += 1
def main(): if __name__ == "__main__": log.write("spider", "info", "Spider starts...") # Below are all the entrance for different websites print "\n---------- yirendai ----------\n" print "Begin search for yirendai..." yirendai.yirendai_start() print "Finish search for yirendai..." print "\n---------- shixinren ----------\n" print "Begin search for yirendai..." shixinren.shixinren_start() print "Finish search for yirendai..." print "\n---------- honglingchuangtou ----------\n" print "Begin search for honglingchuangtou..." honglingchuangtou.honglingchuangtou_start() print "Finish search for honglingchuangtou..." print "\n---------- fujiancourt ----------\n" print "Begin search for fujiancourt..." fujiancourt.fujiancourt_start() print "Finish search for fujiancourt..." print "\n---------- renrendai ----------\n" print "Begin search for renrendai..." renrendai.renrendai_start() print "Finish search for renrendai..." print "\n---------- ppdai ----------\n" print "Begin search for ppdai..." ppdai.ppdai_start() print "Finish search for ppdai..."
def send_hello(self, seq=0xFF, notification_period=0): log.write('** hello from [%d:%d] **' % (self._short_id, self._long_id)) self._Procedures.hello(self._long_id, self._short_id, 0, notification_period) return ser_num = prutils.convert_longid(int(self._long_id))[:3] mcode = 5 mdata_type = 64 mdata_value = chr(0) if seq == 0xFF: seq = self._dpr_seq.next() mdata = chr(mcode) + prutils.str_l( chr(mdata_type) + prutils.str_l(mdata_value) ) device_packet = '\xE4\x00' + chr(seq) + ser_num + mdata # prepare host protocol #final_hop_info = '\x00' * 11 final_hop_info = '\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00' relaying_repeater_ID = 0 repeater_hop_info = '\x00' * 11 #data = chr(short_id) + '\x01\xF8' + final_hop_info + chr(relaying_repeater_ID) + repeater_hop_info + str_l(device_packet) data = chr(self._short_id) + chr(prutils.generate_seq('hpr')) + chr(0x30 + notification_period) + final_hop_info + chr(relaying_repeater_ID) + repeater_hop_info + prutils.str_l(device_packet) cmd = 0x30 data = chr(cmd) + data cmd_l = 2 + len(data) data = chr(cmd_l) + data host_protocol_message = data + prutils.crc16(data)[0] + '\x0A' #TODO: Use "make_host_packet" function to create device packet #host_protocol_message = make_host_packet( 0x30, data) #preparing packet to send #print hexdump(host_protocol_message) packet = host_protocol_message packet = prutils._make_f2_packet( host_protocol_message ) # self.prepare_transmitter() self._Procedures._Communicator.send(packet, 9)
def _register(self): """Register using NICK and USER, then wait for MODE signal and JOIN the channels""" self.nick = self.prefered_nick nick_event = Irc_event("NICK", self.nick) self.send_event(nick_event) user_event = Irc_event( "USER", self.nick, "localhost", "localhost", "irc bot") self.send_event(user_event) identify_event = Irc_event("PRIVMSG", "NickServ", "IDENTIFY %s" % self.password) if self.password: self.send_event(identify_event) while True: event = self._next_event() if event.type == "MODE" and event.args[0] == self.nick: log.write("%s: Connected as %s" % (self.host, self.nick)) # Automaticly join channels if len(self._channels) > 0: join_event = Irc_event("JOIN", ",".join(self._channels)) self.send_event(join_event) break # Nick already in use elif event.type == "433": log.write("%s: %s already taken, switching to %s_." % (self.host, self.nick, self.nick)) self.nick = "%s_" % self.nick nick_event = Irc_event("NICK", self.nick) self.send_event(nick_event)
def cruise(STATISTICS,MASSCAN_AC): while True: now_str = datetime.datetime.now() week = int(now_str.weekday()) hour = int(now_str.hour) if week >= 1 and week <= 5 and hour >= 9 and hour <= 18: # 非工作时间不删除 try: data = mongo.NA_INFO.find().sort("time", 1) for history_info in data: while True: if MASSCAN_AC[0]: # 如果masscan正在扫描即不进行清理 time.sleep(10) else: break ip = history_info['ip'] port = history_info['port'] try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((ip, int(port))) sock.close() except Exception, e: time_ = datetime.datetime.now() date_ = time_.strftime('%Y-%m-%d') mongo.NA_INFO.remove({"ip": ip, "port": port}) log.write('info', None, 0, '%s:%s delete' % (ip, port)) STATISTICS[date_]['delete'] += 1 del history_info["_id"] history_info['del_time'] = time_ history_info['type'] = 'delete' mongo.NA_HISTORY.insert(history_info) except: pass time.sleep(3600)
def send_weights(self): #log.write("prev weights: %s\n" % self.prev_weights) #log.write("weights: %s\n" % self.weights) if self.prev_weights is None: weights = self.weights else: weights = self.weights - self.prev_weights weights.compact() core_weights = [0.] * self.n_core_features sparse_weights = svector.Vector() for feature in weights: if not feature.startswith('_core'): sparse_weights[feature] = -weights[feature] else: i = int(feature[5:]) core_weights[i] = -weights[feature] request = {'core-weights' : ','.join(str(x) for x in core_weights), 'sparse-weights': str(sparse_weights)} if self.prev_weights is None: log.write("setWeights(%s)\n" % request) self.server.setWeights(request) else: log.write("addWeights(%s)\n" % request) self.server.addWeights(request) self.prev_weights = svector.Vector(self.weights)
def read_packet(self, Protocol, timeout = 10): packet = '' # self._bufer = '' stop_time = time.time() + timeout while time.time() < stop_time: data = self.get_row_data() if data is None: # problem with fake packet end: # 19 26 00 03 01 0A 11 A4 00 99 00 00 00 04 09 AA 07 02 80 4D 02 03 A1 0A D8 0A time.sleep(0.09) continue #Reading 1st byte packet = packet + data # if (len(packet) >= Protocol.MIN_PACKET_LEN) and (packet.rfind(Protocol.EOM) == len(packet) - len(Protocol.EOM)): #Found EOM = 0x0A and packets length is more than packet_type.MIN_PACKET_LEN #Checking if packet is correct for char_index in range(len(packet) - Protocol.MIN_PACKET_LEN): offset = self._check_packet(packet[char_index:], Protocol.NAME) if type(offset) is not bool: self._bufer = packet[offset:] packet = packet[char_index:char_index + offset] # log.write('bufer=%s' % hexdump(self._bufer)) log.write('={0}= [RX]: {1}'.format(self.COMPort, hexdump(packet)), "debug") return packet #Packet is incorrect. No byte found that corresponds to packet length #Continue collecting data #print '(def read_packet) Output due to a timeout {0} sec.'.format( timeout ) return None
def write_error(self, status_code, **kwargs): if not log.stdout: log.write('api error: {} for {}'.format(status_code, self.request.uri)) exc_info = kwargs.get('exc_info') if exc_info is not None: log.write(''.join(traceback.format_exception(*exc_info))) super(APIBaseHandler, self).write_error(status_code, **kwargs)
def refresh(self, event): log.write('checkbox.refresh: %d' % event) if self.attr and m.isMayaRunning: value = self.getAttr() if self.attrValue != value: self.control.SetValue( bool(value) ) self.attrValue = value
def build(crawl_subreddits, crawl_urls): urls = [] log.write("Building location list...", 'message') for subreddit in crawl_subreddits: for sort in subreddit['sort']: if sort == "all": sort = "" urls.append("http://www.reddit.com/r/" + subreddit['subreddit'] + "/" + sort + ".json") for url in crawl_urls: urls.append(url + ".json") for url in urls: try: cur.execute("select id from crawl_locations where url = %s", (url,)) if cur.rowcount > 0: cur.execute("update crawl_locations set last_seen = now() where url = %s", (url,)) else: cur.execute("""insert into crawl_locations ( url, last_seen, last_crawled ) values (%s, now(), 0)""", ( url )) db.commit() except Exception, e: log.write('Error storing location: ' + url + ': %s' % e, 'error') db.rollback()
def printStats(): cur.execute("select count(*) from crawl_locations") locationTotal = cur.fetchone()[0] linkLocations = len(linkTimes['counts']) linkCount = sum(linkTimes['counts']) linkElapsedTime = sum(linkTimes['times']) if linkCount == 0: linkRate = 0 else: linkRate = linkElapsedTime / linkCount cur.execute("select count(*) from t3") linkTotal = cur.fetchone()[0] commentLocations = len(commentTimes['counts']) commentCount = sum(commentTimes['counts']) commentElapsedTime = sum(commentTimes['times']) if commentCount == 0: commentRate = 0 else: commentRate = commentElapsedTime / commentCount cur.execute("select count(*) from t1") commentTotal = cur.fetchone()[0] responseLocations = len(responseTimes['counts']) responseCount = sum(responseTimes['counts']) responseElapsedTime = sum(responseTimes['times']) if responseCount == 0: responseRate = 0 else: responseRate = responseElapsedTime / responseCount cur.execute("select count(*) from responses") responseTotal = cur.fetchone()[0] totalElapsed = time.time() - startTime log.write("%d link(s) / %f sec. (%f sec. ea.) in %d location(s)" % (linkCount, linkElapsedTime, linkRate, linkLocations), 'stat') log.write("%d comment(s) / %f sec. (%f sec. ea.) in %d thread(s)" % (commentCount, commentElapsedTime, commentRate, commentLocations), 'stat') log.write("%d response(s) / %f sec. (%f sec. ea.) in %d thread(s)" % (responseCount, responseElapsedTime, responseRate, responseLocations), 'stat') log.write("%d location(s) / %d link(s) / %d comment(s) / %d responses currently in database" % (locationTotal, linkTotal, commentTotal, responseTotal), 'stat') log.write("Execution took %f sec. (%f minutes)" % (totalElapsed, totalElapsed / 60), 'stat')
def add(self, f, i, j, antbins, v={}): item = DotItem(f, i, j, antbins, v) if log.level >= 3: # log.write("Adding dotitem %d: %s,%d,%d,%s\n" % (id(item), f[0], i, j, ",".join(str(antbin) for antbin in antbins))) log.write("Adding dotitem %d: %s,%d,%d\n" % (id(item), f[0], i, j)) self.chart.dot_added += 1 self.bins[i][j].append(item)
def server_discern(self): for mark_info in self.config_ini['Discern_server']: # 快速识别 try: name, default_port, mode, reg = mark_info if mode == 'default': if int(default_port) == self.port: self.server = name elif mode == 'banner': matchObj = re.search(reg, self.banner, re.I | re.M) if matchObj: self.server = name if self.server:break except: continue if not self.server and self.port not in [80,443,8080]: for mark_info in self.config_ini['Discern_server']: # 发包识别 try: name, default_port, mode, reg = mark_info if mode not in ['default','banner']: dis_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) dis_sock.connect((self.ip, self.port)) mode = mode.decode('string_escape') reg = reg.decode('string_escape') dis_sock.send(mode) time.sleep(0.3) dis_recv = dis_sock.recv(1024) matchObj = re.search(reg, dis_recv, re.I | re.M) if matchObj: self.server = name break except: pass if self.server: log.write("server", self.ip, self.port, self.server) mongo.NA_INFO.update({"ip": self.ip, "port": self.port}, {"$set": {"server": self.server}})
def read(self, f, models, weights): if type(f) is str: if os.path.isfile(f): if log.level >= 1: log.write("Reading grammar from %s...\n" % f) f = file(f, "r", 4 * 1024 * 1024) elif os.path.isfile("%s.gz" % f): f = "%s.gz" % f if log.level >= 1: log.write("Decompressing grammar from %s...\n" % f) f = file(f, "r", 4 * 1024 * 1024) f = gzip.GzipFile(fileobj=f) else: if log.level >= 1: log.write("Reading grammar...\n") for line in f: try: r = rule.Rule.from_str(line) except Exception: log.write("warning: couldn't scan rule %s\n" % line.strip()) continue estcost = estimate_rule(r, models, weights) self.add(r, estcost) # self.add(rule.rule_from_line(line)) # this once caused a segfault log.write("%d rules read\n" % self.count)
def expand_unary(self, i, j): """Finish bin (i,j) by building items with unary productions.""" agenda = [(self.nonterminals.getrank(item.x), totalcost, item) for (totalcost, item) in self.bins[i][j]] heapq.heapify(agenda) while len(agenda) > 0: (trank, _, titem) = heapq.heappop(agenda) if log.level >= 3: log.write("Applying unary rules to %s\n" % titem) # it may happen that the item was defeated or pruned before we got to it if titem not in self.bins[i][j].index: continue for (g, dotchart) in self.grammars: if g.filterspan(self.flattice, i, j): for (estcost, r) in g.unary_rules.get(titem.x, ()): rank = self.nonterminals.getrank(r.lhs) # if the new item isn't of lower priority # than the current trigger item (because of # a unary cycle), adding it could corrupt # the forest if rank <= trank: self.unary_pruned += 1 continue (totalcost, (cost, dcost, newstates)) = self.compute_item(r, (titem,), i, j) ded = forest.Deduction((titem,), r, dcost, viterbi=cost) item = forest.Item(r.lhs, i, j, deds=[ded], states=newstates, viterbi=cost) if self.bins[i][j].add(totalcost, item): heapq.heappush(agenda, (rank, totalcost, item))
def sound(self, space, name): """ @brief helper function for getting the path of music and effect sounds. Mainly used from sound.py and sound object. @param space can be: system - music for intro, gui, logo and menu theme - ingame music, like tile hit and game music @returns the path (OS sensitive) of the sound. If file does not exists it will return a default sound, and write the error in a log file""" if space == 'system': path = os.path.join("sounds", name) elif space == 'theme_tile': path = os.path.join("themes", config['theme'], "sounds", "tiles", name) elif space == 'theme_music': path = os.path.join("themes", config['theme'], "sounds", "music", name) elif space == 'theme_ambient': path = os.path.join("themes", config['theme'], "sounds", "ambient", name) else: path = '' if os.path.exists(path): return path else: log.write('Cannot load sound: %s ' % name) path = os.path.join("sounds", "tile1.ogg") if os.path.exists(path): return path else: log.write('OMG, cannot load default sound - this is very strange :-(')
def process_answer(self,q_num,rv): question = self.question_list[q_num] # EVENTUALLY: log this event contains_all = True for input_element_id in self.answer_list[q_num].keys(): if not rv.has_key(input_element_id): contains_all = False break if contains_all: for input_element_id in self.answer_list[q_num].keys(): self.answer_list[q_num][input_element_id] = rv[input_element_id] answer_correct = question.check_answer(self.answer_list[q_num]) else: answer_correct = False if self.practice_mode: if answer_correct: self.message_list[q_num] = 'Correct' else: self.message_list[q_num] = 'Incorrect' else: log.write( self.log_dir, self.log_name, q_num, question.get_question_path(), int(answer_correct) * self.mark_list[q_num], dict(self.answer_list[q_num]), int(time.time()) - self.timer ) self.message_list[q_num] = 'Answer recorded'
def thread(self): if self.attr and m.isMayaRunning: value = self.getAttr() if self.attrValue != value: log.write( 'checkbox.thread: attr %s changed outside %s to %d' % (self.attr, __name__, value) ) self.control.SetValue( bool(value) ) self.attrValue = value
def add_sister_prefixes_helper(a, ephrases, enode, i): """if a phrase comprises one or more (but not all) leftmost children of a constituent, then add it and give it a fake label""" j = i+enode.length if log.level >= 3: log.write("(i,j) = %s\n" % ((i,j),)) x = enode.label j1 = i for ci in xrange(len(enode.children)): child = enode.children[ci] j1 += child.length if log.level >= 3: log.write("(i,j1) = %s\n" % ((i,j1),)) if j1 < j and (i,j1) in ephrases: # constprefix3: #x1 = sym.fromtag("%s*" % x) # subcat-lr2: #subcat = [sister.label for sister in enode.children[ci+1:] if sister.required] #x1 = sym.fromtag("/".join(["%s*"%x]+subcat)) # markov1: x1 = sym.fromtag("%s/%s" % (x, enode.children[ci+1].label)) # markov2: #x1 = sym.fromtag("%s(%s)" % (x, enode.children[ci].label)) a.espans.setdefault((i,j1),[]).append(x1) prefix_labels.add(x1) for child in enode.children: add_sister_prefixes_helper(a, ephrases, child, i) i += child.length
def filterComments(self, test): data = self.api.notifications.get(start_time=self.last_viewed_comment+1)['items'] for rep in data: self.last_viewed_comment = max(self.last_viewed_comment, rep['date']) def _check(s): if 'photo' in s: return str(s['photo']['owner_id']) == self.self_id if 'video' in s: return str(s['video']['owner_id']) == self.self_id if 'post' in s: return str(s['post']['to_id']) == self.self_id if rep['type'].startswith('comment_') or rep['type'].startswith('reply_comment') and _check(rep['parent']): txt = rep['feedback']['text'] if test(txt): log.write('comments', txt) if rep['type'].endswith('photo'): print('Deleting photo comment') self.api.photos.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id']) elif rep['type'].endswith('video'): print('Deleting video comment') self.api.video.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id']) else: print('Deleting wall comment') self.api.wall.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id'])
def rescore(self, models, weights, memo=None, add=False, check_states=False): """Recompute self.viterbi and self.states according to models and weights. Returns the Viterbi vector, and (unlike the decoder) only calls weights.dot on vectors of whole subderivations, which is handy for overriding weights.dot. If add == True, append the new scores instead of replacing the old ones. """ if memo is None: memo = {} if id(self) in memo: return memo[id(self)] # lhuang: vviterbi means "vector Viterbi" vviterbi = None self.states = None for ded in self.deds: ded_vviterbi, states = self.rescore_deduction(ded, models, weights, memo, add=add) if self.states is None: self.states = states elif check_states and states != self.states: # don't check state at the root because we don't care # lhuang: LM intersection if print_states_warning: log.write("warning: Item.rescore(): id(ded)=%s: inconsistent states %s and %s\n" % (id(ded), strstates(models, states), strstates(models, self.states))) if vviterbi is None or ded.viterbi < self.viterbi: vviterbi = ded_vviterbi self.viterbi = weights.dot(vviterbi) memo[id(self)] = vviterbi return vviterbi
def get_hyps(sent, goal, weights): """Assumes that oraclemodel.input() has been called""" # worst violators oracleweights = theoracle.make_weights(additive=True) # we use the in-place operations because oracleweights might be # a subclass of Vector oracleweights *= fear_weight oracleweights += weights goal.reweight(oracleweights) hyps = decoder.get_nbest(goal, 1, 1) result = [] for hypv, hyp in hyps: hypscore = get_score(hypv, hyp) log.write( "added new hyp: %s %s cost=%s score=%s\n" % (" ".join(sym.tostring(e) for e in hyp), hypv, weights.dot(hypv), hypscore) ) # the learner MUST not see the oracle features hypv = theoracle.clean(hypv) result.append((hypv, hyp, hypscore)) return result
def pop(self, key, index): """Pop the value at an index off a list responding to a key. Keys: \"user.email\" means root [ "user" ] [ "email" ] This method is thread safe. """ with self._lock: key_levels = key.split(".") position = self._data for key_level in key_levels: # Check the current type, so the type of the last iteration's position isn't # checked. This will allow getting non-dict values. if type(position) == dict: if key_level in position: # Set position to next dict in "tree" position = position[key_level] continue log.write("Error in data_container: Invalid key: %s" % key) raise ValueError("Error: Invalid key: %s" % key) if not type(position) == list: log.write("Error in data_container: Cannot pop from %s ( %s ), has to be a list" % type( position), key) raise ValueError( "Error: Cannot pop from %s ( %s ), has to be a list" % type(position), key) position.pop(index)
def set(self, key, value): """Set a value. Keys: \"user.email\" means root [ "user" ] [ "email" ] This method is thread safe. """ with self._lock: key_levels = key.split(".") position = self._data # Ommit the last key_level from the iteration so position is the dict we want # to save the value in. for key_level in key_levels[: -1]: if type(position) == dict: if key_level in position: # Set position to the nex position position = position[key_level] # Create silently dict if it doesn't exist else: position[key_level] = {} position = position[key_level] else: log.write("Error in data_container: Invalid key: %s" % key) raise ValueError("Error: Invalid key: %s" % key) # Actually set the value position[key_levels[-1]] = value
def attrCallback(self, *args): if self.attr and m.isMayaRunning: value = self.getAttr() if self.attrValue != value: log.write("checkbox.attrCallback: attr %s changed outside %s to %d" % (self.attr, __name__, value)) self.control.SetValue(bool(value)) self.attrValue = value
def setRelation(self, uid): try: self.api.account.saveProfileInfo(relation_partner_id=uid) self.bf = self.users[uid] log.write('relation', uid) except Exception: log.write('relation', str(uid) + ' failed')
def __init__(self, filename, feat, mapdigits=False, p_unk=None): model.Model.__init__(self) log.write("Reading language model from %s...\n" % filename) if p_unk is not None: self.ngram = Ngram(filename, override_unk=-p_unk) else: self.ngram = Ngram(filename) self.order = self.ngram.order self.mapdigits = mapdigits self.unit = svector.Vector(feat, 1.) self.START = self.ngram.lookup_word("<s>") self.STOP = self.ngram.lookup_word("</s>")
def handle_ready(self, d): log.write('connected as ' + d['user']['username']) self.user_id = d['user']['id'] self.timer_thread = _thread.start_new_thread(self.timer_loop, ()) if config.bot.zkillboard is not None: self.zkill_thread = _thread.start_new_thread(self.zkill_loop, ()) if config.bot.warframe is not None: self.warframe_thread = _thread.start_new_thread( self.warframe_loop, ()) if config.bot.twitter is not None: self.twitter_thread = _thread.start_new_thread( self.twitter_loop, ()) if config.bot.steam_news is not None: self.steam_news_thread = _thread.start_new_thread( self.steam_news_loop, ())
def handle_hello(self, _, d): log.write('connected to %s' % d['_trace']) self.heartbeat_thread = _thread.start_new_thread( self.heartbeat_loop, (d['heartbeat_interval'], )) self.send( OP.IDENTIFY, { 'token': config.bot.token, 'properties': { '$browser': 'github.com/raylu/sbot', '$device': 'github.com/raylu/sbot', }, 'compress': True, 'large_threshold': 50, 'shard': [0, 1] })
def warframe_loop(self): last_alerts = [] while True: time.sleep(5 * 60) try: alerts = warframe.alert_analysis() broadcast_alerts = set(alerts) - set(last_alerts) if len(broadcast_alerts) > 0: self.send_message(config.bot.warframe['channel'], '\n'.join(broadcast_alerts)) last_alerts = alerts except requests.exceptions.HTTPError as e: log.write('warframe: %s\n%s' % (e, e.response.text[:1000])) except requests.exceptions.RequestException as e: log.write('warframe: %s' % e)
def getRecordList(self, subDomain=''): n='getRecordList' log.write(n,'start') func = 'Record.List' params = { 'domain':self.__dp_domain } if subDomain!='': params['keyword']=subDomain result=self.__sendRequest(func,params) if result['status']['code']=='1': log.write(n,'success') return result['records'] else: raise Exception(result['status']['message'])
def initialize_log(self, name): self.log_name = name # initialize log file for i in range(len(self.question_list)): # log question path relative to question library log.write( self.log_dir, self.log_name, i, self.question_list[i].get_question_path(), 0, # mark '', # answer 0 # time )
def Check(conf,vars,cmake,tmpdir,directory,libs): log.write('='*80) log.Println('Checking PRIMME library...') if petscconf.PRECISION != 'double': log.Exit('ERROR: PRIMME is supported only in double precision.') if petscconf.IND64: log.Exit('ERROR: cannot use external packages with 64-bit indices.') functions_base = ['primme_set_method','primme_Free','primme_initialize'] if directory: dirs = [directory] else: dirs = check.GenerateGuesses('Primme') include = 'PRIMMESRC/COMMONSRC' if not libs: libs = ['-lprimme'] if petscconf.SCALAR == 'real': functions = functions_base + ['dprimme'] else: functions = functions_base + ['zprimme'] for d in dirs: if d: if 'rpath' in petscconf.SLFLAG: l = [petscconf.SLFLAG + d] + ['-L' + d] + libs else: l = ['-L' + d] + libs f = ['-I' + d + '/' + include] else: l = libs f = [] if check.Link(tmpdir,functions,[],l+f): conf.write('#ifndef SLEPC_HAVE_PRIMME\n#define SLEPC_HAVE_PRIMME 1\n#endif\n\n') vars.write('PRIMME_LIB = ' + str.join(' ', l) + '\n') vars.write('PRIMME_FLAGS = ' + str.join(' ', f) + '\n') cmake.write('set (SLEPC_HAVE_PRIMME YES)\n') cmake.write('find_library (PRIMME_LIB primme HINTS '+ d +')\n') cmake.write('find_path (PRIMME_INCLUDE primme.h ' + d + '/PRIMMESRC/COMMONSRC)\n') return l+f log.Println('ERROR: Unable to link with PRIMME library') log.Println('ERROR: In directories '+''.join([s+' ' for s in dirs])) log.Println('ERROR: With flags '+''.join([s+' ' for s in libs])) log.Exit('')
def _read_soft_dog_data(): if USE_FAKE_SOFT_DOG_DATA: log.write("USE_FAKE_SOFT_DOG_DATA") return FAKE_SOFT_DOG_DATA soft_dog_data = "" out_buffer = (ctypes.c_char * SOFT_DOG_DATA_SIZE)('\0') if __read_soft_dog_data(out_buffer): for item in out_buffer: if item == '\0': break soft_dog_data = soft_dog_data + item return soft_dog_data
def get_nbest(goal, n_best, ambiguity_limit=None): if log.level >= 1: log.write(" Extracting derivation(s)...\n") result = [] nbest = forest.NBest(goal, ambiguity_limit=ambiguity_limit) for deriv in itertools.islice(nbest, n_best): hyp = Hypothesis() hyp.words = [sym.tostring(e) for e in deriv.english()] hyp.vector = deriv.vector() hyp.deriv = str(deriv) result.append(hyp) return result
def load_mount_history(self): self.history_array = [] #Load the history if os.path.exists(globals.history_list): try: file_history = open(globals.history_list, 'rU') self.mount_history = file_history.readlines() for line in self.mount_history: self.history_storage.append([line.strip()]) self.history_array.append(line.strip()) except IOError, (errno, strerror): log.write( globals.mount_log, _('Error loading history.\nOS error(%s): %s' % (errno, strerror))) except:
def bgw_step_two(network, shares): results = {} for key, (gate_type, output_gate, order) in GATES.items(): if not output_gate in results: results[output_gate] = {1: None, 2: None} if gate_type == INP: results[output_gate][order] = shares[key] elif gate_type == ADD: add_result = add(results[key][1], results[key][2]) results[output_gate][order] = add_result debug(f"Calculating gate {key} (ADD): {results[key][1]} + {results[key][2]} (mod {PRIME}) = {add_result}") elif gate_type == MUL: results[output_gate][order] = multiply(network, results[key][1], results[key][2], key) else: write(f"Error. Unable to evaluate {gate_type} in key {key}") return results[OUTPUT_GATE][1]
def make_menace_move(self, position): if self.current_player != 1: raise Exception("wrong player") self.board.check_valid_move(1, position) i, color = self.get_matchbox() if i is not None: log.write(self.gid, { "mb": i, 1: position, "color": color[position] }) self.make_move(1, position) self.current_player = 2
def checkConf(self, cid): if cid + CONF_START in self.good_conf: return self.good_conf[cid + CONF_START] messages = self.api.messages.getHistory(chat_id=cid)['items'] for i in messages: if i.get('action') == 'chat_create': self.leaveConf(cid) log.write('conf', str(i.get('user_id')) + ' ' + str(cid)) return False title = self.api.messages.getChat(chat_id=cid).get('title', '') if self.bad_conf_title(title): self.leaveConf(cid) log.write('conf', 'conf ' + str(cid) + ' (name: {})'.format(title)) return False self.good_conf[cid + CONF_START] = True return True
def calculate_epsilon(chi, s_n): deriv_x_n = np.transpose( np.matmul(np.array(np.transpose(np.matrix(numerical.jacobian(-2)))), s_n)) res_linapprox = np.transpose( np.matrix( np.array(aux.RADIANCE_FTIR[:]) - (np.array(aux.RADIANCE_LBLDIS[0][-2][:]) + deriv_x_n))) linear_approx = np.float_(np.dot(np.matmul(np.transpose(res_linapprox), aux.S_Y_INV_MATRIX[:]), \ res_linapprox)) change_of_costfunction = aux.CHI2[-1] - chi change_of_costfunction_for_linear_model = aux.CHI2[-1] - linear_approx eps = change_of_costfunction / change_of_costfunction_for_linear_model log.write("# epsilon = {}".format(eps)) return eps
def update_weights(weights, updates, alphas): # sequential minimum optimization # minimize 1/2 ||sum(updates)||**2 + C*sum(xis) # one xi for all candidates for each sentence # s.t. each margin >= loss - xi # s.t. each xi >= 0 # these are not sensitive to feature_scales, but maybe they should be # this is not right -- gammas should be preserved across calls if l1_regularization: gammas = svector.Vector() iterations = 0 done = False while not done: if l1_regularization: for f in weights: delta = max( -l1_regularization * max_learning_rate * len(updates) - gammas[f], min( weights[f], l1_regularization * max_learning_rate * len(updates) - gammas[f])) gammas[f] += delta weights[f] += -delta if log.level >= 4: log.write(" gammas: %s\n" % gammas) done = True sentids = updates.keys() #random.shuffle(sentids) for sentid in sentids: vscores = updates[sentid] if len(vscores) < 2: continue if log.level >= 4: log.write(" sentence %s\n" % sentid) try: weights, alphas[sentid] = update_sentence_weights( weights, updates[sentid], alphas[sentid]) done = False except StopOptimization: pass if log.level >= 4: log.write(" alphas: %s\n" % (" ".join(str(alpha) for alpha in alphas[sentid]))) iterations += 1 if iterations > 1000: log.write(" SMO: 1000 passes through data, stopping\n") break #log.write(" intermediate weights: %s\n" % weights) return weights, alphas
def dump(self): for i in xrange(self.n+1): for j in xrange(i,self.n+1): log.write("Span (%d,%d):\n" % (i,j)) for (totalcost,item) in self.bins[i][j]: log.write("%s totalcost=%f\n" % (str(item),totalcost)) log.write("Goals:\n") for (totalcost,item) in self.goal: log.write("%s totalcost=%f\n" % (str(item),totalcost))
def add_axiom(self, i, j, r, latticev=model.zero): bin = self.bins[i][j] (totalcost, (cost, dcost, newstates)) = self.compute_item(r, (), i, j, latticev) if totalcost < bin.cutoff: ded = forest.Deduction((), r, dcost, viterbi=cost) item = forest.Item(r.lhs, i, j, deds=[ded], states=newstates, viterbi=cost) bin.add(totalcost, item) else: if log.level >= 4: log.write("Prepruning: %s\n" % r) self.prepruned += 1
def guess_apr(tt, fi, rl, ri): global SEARCH_APR tl=tt*(1-fi) ti=tt*fi rms = inversion.__only_fwd(tau_liq=tl, tau_ice=ti, reff_liq=rl, reff_ice=ri, filenum=int(tt+fi+rl+10.*ri))[0] lock = threading.Lock() lock.acquire() log.write("{} {}".format(rms, [tl, ti, rl, ri])) SEARCH_APR_RMS.append(rms) SEARCH_APR_MCP.append([tl, ti, rl, ri]) lock.release() return
def load_previously_mounted_images(self): if os.path.exists(globals.mount_list): try: file_previous_images = open(globals.mount_list, 'rU') previous_image_history = file_previous_images.readlines() for line in previous_image_history: previous_image = line.strip().split(',') self.image_storage.append([ previous_image[0], previous_image[1], previous_image[2] ]) os.remove(globals.mount_list) except IOError, (errno, strerror): log.write( globals.mount_log, _('Error loading history.\nOS error(%s): %s' % (errno, strerror))) except:
def get_value_by_key(key): global SOFT_DOG_DATA_HAS_PARSED if not SOFT_DOG_DATA_HAS_PARSED: _parse_soft_dog_data(_read_soft_dog_data()) SOFT_DOG_DATA_HAS_PARSED = True key_list = ["token", "version", "schoolAndClassroomCode", "studentCount", "startDate", "endDate", "hash", "schoolCode", "classroomCode"] if key not in key_list: log.write("Key: " + key + " not correct.") return "" return SOFT_DOG_DATA[key]
def getPersonID(self, nickName, source): self.ping() try: sql = "select personID from personInfo where nickName = '%s' and infoSource = '%s'" % ( nickName, source) res = self.select(sql) if len(res) == 0: return -1 else: return int(res[-1][0]) except Exception as e: log.write( 'db', 'error', 'getPersonID() Cannot execute sql(%s), exception: %s' % (sql, e)) return -1
def run(self): self.timeout = int(self.config_ini['Timeout']) for _port in self.port_list: self.server = '' self.banner = '' self.port = int(_port) self.scan_port() # 端口扫描 if not self.banner:continue self.server_discern() # 服务识别 if self.server == '': web_info = self.try_web() # 尝试web访问 if web_info: log.write('web', self.ip, self.port, web_info) time_ = datetime.datetime.now() mongo.NA_INFO.update({'ip': self.ip, 'port': self.port}, {"$set": {'banner': self.banner, 'server': 'web', 'webinfo': web_info, 'time': time_}})
def process_heldout(sent): # Need to add an flen attribute that gives the length of the input sentence. # In the lattice-decoding case, we have to make a guess. distance = sent.compute_distance() sent.flen = distance.get((0, sent.n - 1), None) # could be missing if n == 0 theoracle.input(sent) log.write("done preparing\n") global decoder_errors try: goal = thedecoder.translate(sent) thedecoder.process_output(sent, goal) decoder_errors = 0 if goal is None: raise Exception("parse failure") except Exception: import traceback log.write("decoder raised exception: %s" % "".join(traceback.format_exception(*sys.exc_info()))) decoder_errors += 1 if decoder_errors >= 100: log.write( "decoder failed too many times, passing exception through!\n" ) raise else: return goal.rescore(theoracle.models, thedecoder.weights, add=True) bestv, best = decoder.get_nbest(goal, 1)[0] log.write("done decoding\n") bestg = theoracle.finish(bestv, best) #bestscore = theoracle.make_weights(additive="sentence").dot(bestg) bestscore = theoracle.make_weights(additive="edge").dot(bestg) log.write("best hyp: %s %s cost=%s score=%s\n" % (" ".join( sym.tostring(e) for e in best), bestv, thedecoder.weights.dot(bestv), bestscore)) sent.score_comps = bestg sent.ewords = [sym.tostring(e) for e in best] return sent
def __watch_thread(running_token, cancellation_token, clients, on_client_closed, polling_interval, retry_count): retry_cache = {} while not cancellation_token.is_set(): if not running_token.is_set(): running_token.wait() if len(clients) > 0: def _get_query_url(client): return 'http://{}/{}'.format(client.address, query.IS_CLIENT_ALIVE) urls = [_get_query_url(client) for client in clients] results = http_request.gets_sync(urls) # 通信に失敗した回数を数えておく client_dict = { _get_query_url(client): client for client in clients } for url, (status_code, _) in results.items(): client = client_dict[url] if status_code != http_request.STATUS_OK: log.write('client may be disconnected: {}'.format(url)) if client in retry_cache: retry_cache[client] += 1 else: retry_cache[client] = 1 else: log.write('client is alive: {}'.format(url)) if url in retry_cache: retry_cache[client] = 0 # リトライ回数が retry_count を超えたら切断されたと判断する closed_clients = filter( lambda client: retry_cache[client] > retry_count, retry_cache) for client in closed_clients: on_client_closed(client) retry_cache.pop(client) time.sleep(1)
def execute(attempt, params): import psycopg2 global conn, enabled, emergency if conn is None and not emergency: if not config.get('db_logger.host') or not config.get('db_logger.database'): print('Incorrect database configuration!') enabled = False return try: conn = psycopg2.connect("dbname='{}' user='******' host='{}' password='******'".format(config.get('db_logger.database'), config.get('db_logger.username'), config.get('db_logger.host'), config.get('db_logger.password'))) except psycopg2.OperationalError as e: print(e, flush=True) _log.write('error', 'PostgreSQL connection error: ' + str(e)) conn = None else: if os.path.isfile(accounts.getFile('db_log.csv')): try: restoreRecords() except psycopg2.Error: if attempt < 5: return execute(attempt + 1, params) else: emergency = True if emergency or attempt >= 5: emergency = True with open(accounts.getFile('db_log.csv'), 'a') as f: csv.writer(f).writerow(params) return True else: if conn is None: return False try: conn.cursor().execute(PG_QUERY, params) conn.commit() except psycopg2.Error as e: print(e, flush=True) _log.write('error', 'PostgreSQL error: ' + str(e)) conn = None return False else: return True
def expand_goal(self, bin1): for (cost1, item1) in bin1: if item1.x == self.start_nonterminal: if log.level >= 3: log.write("Considering: %s\n" % str(item1)) dcost = sum((m.finaltransition(item1.states[m_i]) for (m_i, m) in enumerate(self.models)), svector.Vector()) cost = item1.viterbi + self.weights.dot(dcost) ded = forest.Deduction((item1, ), None, dcost, viterbi=cost) self.goal.add( cost, forest.Item(None, 0, self.flattice.n - 1, deds=[ded], states=(), viterbi=cost))
def compute_alpha(weights, update, loss, minalpha, maxalpha): """MIRA formula for update size""" sumsq = normsquared(update) margin = -weights.dot(update) if log.level >= 4: log.writeln("delta = (%s-%s)/%s" % (loss, margin, sumsq)) if sumsq > 0.: alpha = (loss - margin) / sumsq alpha = max(minalpha, min(maxalpha, alpha)) elif loss - margin > 0.: alpha = maxalpha elif loss - margin < 0.: alpha = minalpha else: log.write("compute_alpha: 0/0, this shouldn't happen\n") alpha = 0. return alpha
def output_gate(gate_inputs, final_gate, network): ''' Perform shamir secret sharing for the final value of this party's circuit ''' if len(gate_inputs[final_gate]) != 1: raise Exception("[FINAL][ERROR] Incorrect number of inputs: %d" % \ len(gate_inputs[final_gate])) final_value = gate_inputs[final_gate][0] log.debug("[FINAL] Sending final value %d all parties" % \ (gate_inputs[final_gate][0])) send_final_value(final_value, final_gate, network) final_shares = receive_shares(final_gate, network) log.write("[FINAL] Final received shares %s" % final_shares) final_result = interpolate(final_shares, final=True) return final_result
def mul_gate(gate_inputs, curr_gate, next_gate, input_no, network): ''' Multiply two inputs, perform degree reduciton via secret sharing and interpolation, store result in gate_inputs ''' if (len(gate_inputs[curr_gate]) != 2): raise Exception("[MUL][ERROR] Incorrect number of inputs: %d" % \ len(gate_inputs[curr_gate])) op1, op2 = gate_inputs[curr_gate] needs_reduction_res = modprime.mul(op1, op2) log.write("[MUL] Performing (%d * %d) mod prime = %d" % \ (op1, op2, needs_reduction_res)) res = degree_reduction(needs_reduction_res, curr_gate, network) if input_no == 1: gate_inputs[next_gate] = [res] else: gate_inputs[next_gate].append(res)
def degree_reduction(needs_reduction_res, curr_gate, network): ''' Wrapper function to perform degree reduction. Returns interpolated value. ''' # Create new polynomial, create shares and send them to all parties deg_red_coeffs = gen_coeffs(needs_reduction_res) log.debug("[DEGRED] coeffs %s" % deg_red_coeffs) create_and_send_shares(curr_gate, deg_red_coeffs, network) # Get shares sent from all parties, and interpolate to get new output received_shares = receive_shares(curr_gate, network) log.write("[DEGRED] received shares %s" % receive_shares) res = interpolate(received_shares, final=False) log.write("[DEGRED] interpolated output of gate %d: %d" %\ (curr_gate, res)) return res