def connect(msg, port, host='localhost', counter=0): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setblocking(5) try: s.connect((host, port)) except: return {'error': 'cannot connect host:' + str(host) + ' port:' + str(port)} try: msg['version'] = custom.version except: pass r = send_msg(msg, s) if r == 'peer died': return 'peer died: ' + str(msg) data = recvall(s) if data == 'broken connection': tools.log('broken connection: ' + str(msg)) return connect_error(msg, port, host, counter) if data == 'no length': tools.log('no length: ' + str(msg)) return connect_error(msg, port, host, counter) return data
def recvall(client, data=''): try: data += client.recv(MAX_MESSAGE_SIZE) except: time.sleep(0.0001) tools.log('not ready') recvall(client, data) if not data: return 'broken connection' if len(data) < 5: return recvall(client, data) try: length = int(data[0:5]) except: return 'no length' tries = 0 data = data[5:] while len(data) < length: d = client.recv(MAX_MESSAGE_SIZE - len(data)) if not d: return 'broken connection' data += d try: data = unpackage(data) except: pass return data
def run(self): self.state = None self.end = False tools.log("Starting",log_type=tools.LOG_MAIN,class_type=self) name = "/tmp/"+"tmpTop.tex" ## WINDOWS : corriger le /tmp try: while True: latex = self.queue.get(timeout=15) if self.queue.empty() is not True: continue with open(name+".tex", "w") as f: ## WINDOWS : corriger le /tmp f.write(latex) cmd = "/usr/bin/pdflatex -halt-on-error -output-directory=%(dir)s %(name)s.tex" % {"name": name, "dir": "/tmp"} args = shlex.split(cmd) p = subprocess.Popen(args,stdout=subprocess.PIPE) while p.poll() is None: tools.log("Compiling ..",log_type=tools.LOG_THREAD,class_type=self) time.sleep(0.1) self.state = p.wait() == 0 tools.log("Compilation end : "+str(self.state),log_type=tools.LOG_THREAD,class_type=self) self._update_state() except queue.Empty: tools.log("The queue is empty -> thread end",log_type=tools.LOG_THREAD,class_type=self) self.end = True tools.log("Ending",log_type=tools.LOG_MAIN,class_type=self)
def profile_range_draws(command, r_draws, dataset, usegpu=False): log("\n\n=== " + dataset + " dataset. " + command.split()[1] + ('(using GPU)' if usegpu else '') + " ===") log("Ndraws Time(s) Log-Likeli. RAM(GB) GPU(GB) Converg.") for r in range(1, r_draws + 1): os.system("{} {} {} {} prof".format(command, r * 100, dataset, usegpu * 1))
def save_db_table(table_name: str, df: pd.DataFrame, fields: list): ok = False connect_string = 'sqlite:///traffic.sqlite3' try: sql_engine = sql.create_engine(connect_string, pool_recycle=3600) db_connection = sql_engine.connect() except Exception as ex: print(ex) try: if len(fields) > 0: df = df[fields] df.to_sql(table_name, db_connection, if_exists='append', chunksize=20000, index=False) tools.log(f'dataframe appended to {connect_string}') ok = True except ValueError as vx: print(vx) except Exception as ex: print(ex) finally: db_connection.close() return ok
def recvall(client, data=''): try: data+=client.recv(MAX_MESSAGE_SIZE) except: time.sleep(0.0001) tools.log('not ready') recvall(client, data) if not data: return 'broken connection' if len(data)<5: return recvall(client, data) try: length=int(data[0:5]) except: return 'no length' tries=0 data=data[5:] while len(data)<length: d=client.recv(MAX_MESSAGE_SIZE-len(data)) if not d: return 'broken connection' data+=d try: data=unpackage(data) except: pass return data
def pull(self, *args, log_uniform=False, id=False): args = list(args) assert isinstance(args[0], str) args[0] = make_id(args[0]) if id else args[0] # opt = trial.suggest_categorical('optimizer', ['MomentumSGD', 'Adam']) if isinstance(args[1], list): return self.log_and_return( args, self.trial.suggest_categorical(*args)) # num_layers = trial.suggest_int('num_layers', 1, 3) elif isinstance(args[1], int) and isinstance(args[2], int): return self.log_and_return(args, self.trial.suggest_int(*args)) # learning_rate = trial.suggest_loguniform('learning_rate', 1e-5, 1e-2) # dropout_rate = trial.suggest_uniform('dropout_rate', 0.0, 1.0) elif isinstance(args[1], float) and isinstance(args[2], float): if log_uniform: return self.log_and_return( args, self.trial.suggest_loguniform(*args)) return self.log_and_return(args, self.trial.suggest_uniform(*args)) # rate = trial.suggest_discrete_uniform('rate', 0.0, 1.0, 0.1) elif len(args) is 4: return self.log_and_return( args, self.trial.suggest_discrete_uniform(*args)) else: log("FAILED TO PULL FOR ARGS", args, color="red") raise Exception("AI.Pull failed")
def _init_even_class_index(self): # self._even_class_index = [] self._even_class_index = [[] for i in range(self._num_classes)] sample_per_class = max(int(len(self._train_y) / self._num_classes), 1) log_info = list() for i in range(self._num_classes): class_cnt = len(self._each_class_index[i]) tmp = [] log_info.append([i, class_cnt]) # log("init even class index, class_id={}, class_cnt={}".format(i, class_cnt)) # fixme: bug, class_cnt 可能为0 if class_cnt == 0: pass elif class_cnt < sample_per_class: tmp = self._each_class_index[i] * \ int(sample_per_class / class_cnt) tmp += random.sample( self._each_class_index[i], sample_per_class - len(tmp)) else: tmp += random.sample( self._each_class_index[i], sample_per_class) random.shuffle(tmp) self._even_class_index[i].extend(tmp) log("init even class index, class_id, class_cnt={}".format(log_info))
def __init__(self, metadata, train_output_path="./", test_input_path="./"): """ Initialization for model :param metadata: a dict formed like: {"class_num": 7, "train_num": 428, "test_num": 107, "time_budget": 1800} """ self.done_training = False self.metadata = metadata self.train_loop_num = 0 log('Metadata: {}'.format(self.metadata)) self.data_manager = None self.model_manager = None self.train_output_path = train_output_path self.test_input_path = test_input_path self.second_model = None self.over_num = 6 self.last_pred = None self.start = True
def ff(queue, g, b, s): while not b(): time.sleep(0.0001) try: g(queue.get(False), DB) except: tools.log('suggestions ' + s + ' '+str(sys.exc_info()))
def _blending_ensemble(self): selected_k_best = [ self._k_best_predicts[i] for i, a in enumerate(self._k_best_auc) if a > 0.0 ] each_model_k_aucs, selected_each_model_k_best = self._get_each_model_top_k_predicts( ) if self._round_num >= 2: selected = selected_k_best + selected_each_model_k_best else: selected = selected_k_best log("model_num: {} Select k best {} predicts which have auc {}, ". format(self._model_num, self._keep_num, self._k_best_auc) + "each model {} best which have auc {}, ".format( self._each_model_keep_num, each_model_k_aucs) + "and each previous model's best predict which have auc " + "{} ".format([ '({}:{})'.format(k, v) for k, v in self._each_model_best_auc.items() ])) if len(selected_k_best) >= 5: selected_k_best = len(selected_k_best[:5]) return np.mean(selected, axis=0)
async def recheck(ctx, user: discord.Member): try: userid = str(user.id) username = str(user.name) print("[INFO] Une nouvelle vérification pour " + username + " a été demandée.") tools.log("[INFO] Une nouvelle vérification pour " + username + " a été demandée.") role = get(ctx.guild.roles, name="Verified") if role in user.roles: await user.remove_roles(role) role = get(ctx.guild.roles, name="instinct") if role in user.roles: await user.remove_roles(role) role = get(ctx.guild.roles, name="valor") if role in user.roles: await user.remove_roles(role) role = get(ctx.guild.roles, name="mystic") if role in user.roles: await user.remove_roles(role) os.remove("servers_lists\\" + userid + "_servers_list.txt") await user.send( "[Pokémon GO Marseille]\n:flag_us: You have been asked for a new verification, please follow the autorisation link and use ./verify to verify yourself.\n:flag_fr: Une nouvelle vérification de votre part est requise, veuillez suivre le lien d'autorisation et utilisez ./verify pour vous vérifier." ) except KeyboardInterrupt: return except Exception as e: print( Fore.RED + Style.BRIGHT + "[WARN] Une erreur inconnue est survenue. Veuillez vérifier les fichiers Anti-Cheat.log et Anti-Cheat_traceback.log pour en savoir plus." + Style.RESET_ALL) tools.log("[ERRO] " + str(e)) tools.log_traceback(traceback.format_exc())
def get_img(url, userid): try: filename = url.rsplit('/', 1)[1] r = requests.get(url, allow_redirects=True) open(filename, 'wb').write(r.content) global team team = getcolor.main_color(filename) global pokeid pokeid = ocr.getid(filename, userid) except KeyboardInterrupt: return except IndexError as i: print( Fore.RED + Style.BRIGHT + "[WARN] URL soumises incorrecte. Veuillez vérifier les fichiers Anti-Cheat.log et Anti-Cheat_traceback.log pour en savoir plus." + Style.RESET_ALL) tools.log("[ERRO] " + str(i)) tools.log_traceback(traceback.format_exc()) pokeid = "ERROR" except Exception as e: print( Fore.RED + Style.BRIGHT + "[WARN] Une erreur inconnue est survenue. Veuillez vérifier les fichiers Anti-Cheat.log et Anti-Cheat_traceback.log pour en savoir plus." + Style.RESET_ALL) tools.log("[ERRO] " + str(e)) tools.log_traceback(traceback.format_exc())
def _get_or_create_model(self): # use new model and not reset model, have to initialize the model if not self._model.is_init: log(f'get new model {self._model_name}') # init model parameters if self._model_name == CNN_MODEL_2D: kwargs = { 'input_shape': self._input_shape[1:], 'num_classes': self.metadata[CLASS_NUM], 'max_layer_num': 10 } elif self._model_name in [ LSTM_MODEL, BILSTM_MODEL, CRNN_MODEL, CRNN2D_MODEL, CRNN2D_LARGER_MODEL, CRNN2D_VGG_MODEL, ATTGRU ]: kwargs = { 'input_shape': self._input_shape[1:], 'num_classes': self.metadata[CLASS_NUM], } elif self._model_name == SVM_MODEL: kwargs = {'kernel': 'linear', 'max_iter': 1000} elif self._model_name == LR_MODEL: kwargs = {'kernel': 'liblinear', 'max_iter': 100} else: raise Exception("No such model!") if not self._model.is_init: self._model.init_model(**kwargs) log(f'This train loop use {self._model_name}, last train loop use {self._last_model_name}' )
def getAbstract(MyWebdriver, logPath='', warningPath=''): abstract = '' #获取abstract try: MyWebdriver.WebDriverWait_until( 30, lambda x: x.find_element_by_xpath( '/html/body/font/div/font[2]/div')) abstractTag = MyWebdriver.find_element_by_xpath( '/html/body/font/div/font[2]/div') except Exception as e: warningInfo = 'Can not get the abstract from this page\n Failed info: {0}'.format( repr(e)) tools.warning(warningInfo, warningPath) else: abstracts = (abstractTag.text.split('\n')[1:]) if len( abstractTag.text.split('\n')) > 0 else [] for abs in abstracts: abstract += abs successInfo = 'Successfully get the abstract from this page' if abstract == '': successInfo = '!!!Successfully get the abstract from this page, but the abstract is None' tools.log(successInfo, logPath) # print(abstract) return abstract
def _init_even_class_index_by_each(self, each_class_index_list): even_class_index = [] sample_per_class = max(int(len(self._train_y) / self._num_classes), 1) log_info = list() for i in range(self._num_classes): class_cnt = len(each_class_index_list[i]) tmp = [] log_info.append([i, class_cnt]) # log("init even class index, class_id={}, class_cnt={}".format(i, class_cnt)) # fixme: bug, class_cnt 可能为0 if class_cnt == 0: log("Init even class index, class_id={} cn=0".format(i)) pass elif class_cnt < sample_per_class: tmp = each_class_index_list[i] * \ int(sample_per_class / class_cnt) tmp += random.sample( each_class_index_list[i], sample_per_class - len(tmp)) else: tmp += random.sample( each_class_index_list[i], sample_per_class) random.shuffle(tmp) even_class_index.append(tmp) log("Init even class index, class_id, class_cnt={}".format(log_info)) return even_class_index
def getPdfURL(MyWebdriver, logPath='', warningPath=''): pdfURL = '' #获取pdfURL try: MyWebdriver.WebDriverWait_until( 30, lambda x: x.find_element_by_xpath( '/html/body/font/div/font[2]/table/tbody/tr/td[2]/table/tbody/tr/td/font/a' )) downloadTag = MyWebdriver.find_element_by_xpath( '/html/body/font/div/font[2]/table/tbody/tr/td[2]/table/tbody/tr/td/font/a' ) except Exception as e: warningInfo = 'Can not get the pdfURL from this page\n Failed info:{0}'.format( repr(e)) tools.warning(warningInfo, warningPath) else: pdfURL = downloadTag.get_attribute('href') successInfo = 'Successfully get the pdfURL from this page' if pdfURL == None: pdfURL = '' successInfo = '!!!Successfully get the pdfURL from this page, but the pdfURL is None' tools.log(successInfo, logPath) # print(pdfURL) return pdfURL
def _train_val_split_index_by_y(self, cur_y_array, ratio=0.8): all_index, train_index, val_index = [], [], [] for i in range(self._num_classes): all_index.append( list(np.where(cur_y_array[:, i] == 1)[0])) log_info = list() for i in range(self._num_classes): val_ratio_num = int(len(all_index[i]) * (1 - ratio)) # sample_num = max(MIN_VALID_PER_CLASS, int(len(all_index[i]) * (1 - ratio))) sample_num = max(MIN_VALID_PER_CLASS, val_ratio_num) # log("sample_num={}, min_valid_per_class={}, sample_maxone={}".format(sample_num, MIN_VALID_PER_CLASS, val_ratio_num)) log_info.append([sample_num, MIN_VALID_PER_CLASS, val_ratio_num]) if sample_num <= len(all_index[i]): tmp = random.sample(all_index[i], sample_num) else: tmp = all_index[i] if len(tmp) > MAX_VALID_PERCLASS_SAMPLE: tmp = tmp[:MAX_VALID_PERCLASS_SAMPLE] val_index += tmp differ_set = set(all_index[i]).difference(set(tmp)) # avoid some classes only have one sample if len(differ_set) == 0: differ_set = set(tmp) train_index += list(differ_set) log("Split Val all sample_num, min_valid_per_class, sample_maxone {}".format(log_info)) log("note: cur_y_array len={}, train_index_len={}, val_index_len={}".format(len(cur_y_array), len(train_index), len(val_index))) return train_index, val_index
def test(self, dataset, remaining_time_budget=None): """Make predictions on the test set `dataset` (which is different from that of the method `train`). Args: Same as that of `train` method, except that the labels will be empty (all zeros) since this time `dataset` is a test set. Returns: predictions: A `numpy.ndarray` matrix of shape (sample_count, output_dim). here `sample_count` is the number of examples in this dataset as test set and `output_dim` is the number of labels to be predicted. The values should be binary or in the interval [0,1]. """ """Test method of domain-specific model.""" try: if self._domain == "tabular": return self._tabular.test(dataset, remaining_time_budget) # Convert test dataset to necessary format and # store as self.domain_dataset_test # K.set_learning_phase(1) if self._domain in ['image', 'video']: pred_y = self.CVModel.test( dataset, remaining_time_budget=remaining_time_budget) self.done_training = self.CVModel.done_training return pred_y # As the original metadata doesn't contain number of test examples, we # need to add this information if self.train_loop_num == 1: if self._domain == 'text' and self._text_cut_len: dataset = dataset.map(lambda x, y: (x[:self._text_cut_len], y), num_parallel_calls=os.cpu_count()) self._test_dataset = self._read_domain_dataset( dataset, is_training=False) elif self._domain == 'text' and self._model_manager.is_read_rest_2 and self._need_reread: self._test_dataset = self._read_domain_dataset( dataset, is_training=False, need_reread=True) self._model_manager.is_read_rest_2 = False log("Reread test dataset") if self._domain in ['text', 'speech'] and \ (not self._domain_metadata['test_num'] >= 0): self._domain_metadata['test_num'] = len(self._test_dataset) print("test_num {}".format( self._domain_metadata['test_num'])) # Make predictions pred_y = self._model_manager.predict(self._test_dataset) return pred_y except RuntimeError as exception: if "out of memory" in str(exception): self.done_training = True log("we met cuda out of memory") else: raise exception
def connected_msg(form): log("来自客户端的:", form) m, check_cookie = guest(form) all_user = MessageBoard.all_user() # 判断内容是否空白 if m.content == ' ' or len(m.content) == 0: emit('flash_message', {'flash': "你说得很空白无力"}) # 排除重名 elif m.message_user is '' and m.user is None: emit('flash_message', {'flash': "给自己一个好听的昵称吧"}) # 查看cookie是否匹配 elif (check_cookie is not '' or check_cookie is not None) \ and check_cookie != request.cookies.get('cookie') \ and m.message_user in all_user \ and m.user is None: emit('flash_message', {'flash': "好名字都被别人取了,再想一个昵称吧"}) else: m.save() print('save') # return redirect(url_for('.index')) emit( 'new_message', { 'user': m.user, 'message_user': m.message_user, 'id': m.id, 'ct': m.ct, 'content': m.content, }, broadcast=True, )
def __init__(self, metadata): """ Args: metadata: an AutoDLMetadata object. Its definition can be found in AutoDL_ingestion_program/dataset.py """ # set_random_seed_all(666) self.done_training = False self.train_loop_num = 0 self._domain = infer_domain(metadata) log("The inferred domain of current dataset is: {}.".format( self._domain)) self._domain_metadata = get_domain_metadata(metadata, self._domain) self._origin_metadata = metadata log("Metadata: {}".format(self._domain_metadata)) self._data_manager = None self._model_manager = None if self._domain == "image" or self._domain == "video": self.CVModel = AutoCVModel(self._domain_metadata) return if self._domain == "tabular": self._tabular = TabularModel(metadata, sess) return if self._domain == 'text': if self._domain_metadata['language'] == EN: self._embedding_dict = load_embedding_dict( EN_EMBEDDING_PATH) if IS_LOAD_EMBEDDING else None self._stopwords = load_stopwords(NLP_FEATURE_DIR + '/english_stopwords.txt') elif self._domain_metadata['language'] == ZH: self._embedding_dict = load_embedding_dict( ZH_EMBEDDING_PATH) if IS_LOAD_EMBEDDING else None self._stopwords = load_stopwords(NLP_FEATURE_DIR + '/chinese_stopwords.txt') else: raise Exception('Unsupport language {}'.format( self._domain_metadata["language"])) self.session = None self._is_train_data_read = False self._is_test_data_read = False self._train_iterator = None self._next_element = None self._train_dataset = None self._test_dataset = None self._is_multilabel = False self._class_num = self._domain_metadata[CLASS_NUM] self._class_set = set() self._text_cut_len = NLP_READ_CUT_LEN self._need_reread = None if self._domain == "text": self.vocabulary = None self._is_use_fast_model = True #(self._domain_metadata['language'] == EN) and self._class_num == 2 # only use in EN text self._is_use_simple_model = self._class_num != 2
def update(self): """ in zope this would be __set_state__ - we have our state and create the view """ self.set_state(LC.updating) log(self, 'update, state:', self.state) self.dom_update() self.set_state(LC.updated)
def SVD_consensus_check(tx, txs, out, DB): if not E_check(tx, 'vote_id', [str, unicode]): return False if not E_check(tx, 'decisions', [list]): return False if not tools.reveal_time_p(DB, custom.SVD_length): out[0]+='this is not the correct time to do SVD' return False if is_number(tx['vote_id']): out[0]+='that can not be a number' return False jury=tools.db_get(tx['vote_id'], DB) if len(tx['decisions'])<5: out[0]+='need at least 5 decisions to compute SVD' return False if not E_check(jury, 'members', [list]): out[0]+='that jury has not been created yet' return False if len(jury['members'])<3: out[0]+='need at least 3 voters in order to compute SVD' return False try: matrix=txs_tools.decision_matrix(jury, tx['decisions'], DB) except: tools.log(sys.exc_info()) tools.log('matrix failure') return False w=txs_tools.weights(tx['vote_id'], DB, jury) k=txs_tools.decisions_keepers(tx['vote_id'], jury, DB) for decision in tx['decisions']: if not decision in k: out[0]+='one of the decisions has insufficient participation*certainty or has not matured yet: ' +str(decision)+' '+str(tools.db_get(decision)) return False if not txs_tools.fee_check(tx, txs, DB): out[0]+='you do not have enough money' return False return True
def add_recent_hash(tx): length = tools.local_get("length") if "recent_hash" not in tx and length > 0: b = tools.db_get(max(1, length - 2))["block_hash"] tools.log("b: " + str(b)) tx["recent_hash"] = b return tx
def map_sets_tf(tensor, set_size, fn, pool, pool_axis=DEFAULT_POOL_AXIS): """ Map_fn FN over SET_SIZE sets in TENSOR & POOL results Args: tensor: tf.Tensor from which we draw sets set_size: integer # of elements from tensor to which we apply fn fn: callable to apply to each set of set_size elements in tensor pool: callable to apply to the results of mapping fn over sets Returns: pooled result of fn mapped over set_size sets in tensor Roughly: pool(map(fn, sets(iterable, set_size))) """ log("map_sets_tf", tensor, set_size, fn, pool, color="red") indices = tf.reshape(np.indices(tensor.shape), [-1]) if set_size is 0: return pool(tf.map_fn(fn, tensor), axis=pool_axis) else: return pool(tf.map_fn( lambda index_set: fn(tf.gather_nd(tensor, index_set)), list(sets(indices, set_size))), axis=pool_axis)
def handleStaticBgSettings(self): log('settings() - handleStaticBgSettings') if (self.category == "static" and # only for 'static' category self.other_static_bg and # only if we want it displayed on static (not (self.screensaver and self.other_static_onscreensaver)) # only if screen saver is off and we want it on ): bob.bob_set_priority(128) # allow lights to be turned on rgb = (c_int * 3)(self.other_static_red, self.other_static_green, self.other_static_blue) ret = bob.bob_set_static_color(byref(rgb)) self.staticBobActive = True else: bob.bob_set_priority(255) self.staticBobActive = False if self.category == "3dTAB": self.mode3dActiveTAB = True self.mode3dActiveSBS = False elif self.category == "3dSBS": self.mode3dActiveSBS = True self.mode3dActiveTAB = False else: self.mode3dActiveTAB = False self.mode3dActiveSBS = False
def setupForMovie(self): log('settings() - setupForMovie') if self.movie_preset == 1: #preset smooth saturation = 3.0 value = 10.0 speed = 20.0 autospeed = 0.0 interpolation = 0 threshold = 0.0 elif self.movie_preset == 2: #preset action saturation = 3.0 value = 10.0 speed = 80.0 autospeed = 0.0 interpolation = 0 threshold = 0.0 elif self.movie_preset == 0: #custom saturation = self.movie_saturation value = self.movie_value speed = self.movie_speed autospeed = self.movie_autospeed interpolation = self.movie_interpolation threshold = self.movie_threshold return (saturation,value,speed,autospeed,interpolation,threshold)
def spend_verify(tx, txs, out, DB): txaddr=tools.addr(tx) h=tx['recent_hash'] l=tools.local_get('length') r=range(l-10, l) r=filter(lambda l: l>0, r) recent_blocks=map(lambda x:tools.db_get(x), r) recent_hashes=map(lambda x: x['block_hash'], recent_blocks) if h not in recent_hashes: tools.log('recent hash error') return False recent_txs=[] def f(b, recent_txs=recent_txs): recent_txs=recent_txs+b['txs'] map(f, recent_blocks) recent_txs=filter(lambda t: t['type']=='spend', recent_txs) recent_txs=filter(lambda t: t['recent_hash']==h, recent_txs) recent_txs=filter(lambda t: t['to']==tx['to'], recent_txs) recent_txs=filter(lambda t: t['amount']==tx['amount'], recent_txs) recent_txs=filter(lambda t: t['fee']==tx['fee'], recent_txs) recent_txs=filter(lambda t: tools.addr(t)==txaddr, recent_txs) if len(recent_txs)>0: out[0]+='no repeated spends' return False if not signature_check(tx): out[0]+='signature check' return False if len(tx['to'])<=30: out[0]+='that address is too short' out[0]+='tx: ' +str(tx) return False if not tools.fee_check(tx, txs, DB): out[0]+='fee check error' return False return True
def setupForMusicVideo(self): log('settings() - setupForMusicVideo') if self.music_preset == 1: #preset Ballad saturation = 3.0 value = 10.0 speed = 20.0 autospeed = 0.0 interpolation = 1 threshold = 0.0 elif self.music_preset == 2: #preset Rock saturation = 3.0 value = 10.0 speed = 80.0 autospeed = 0.0 interpolation = 0 threshold = 0.0 elif self.music_preset == 3: #preset disabled saturation = 0.0 value = 0.0 speed = 0.0 autospeed = 0.0 interpolation = 0 threshold = 0.0 elif self.music_preset == 0: #custom saturation = self.music_saturation value = self.music_value speed = self.music_speed autospeed = self.music_autospeed interpolation = self.music_interpolation threshold = self.music_threshold return (saturation,value,speed,autospeed,interpolation,threshold)
def peer_check(i, peers, DB): peer=peers[i][0] block_count = cmd(peer, {'type': 'blockCount'}) if not isinstance(block_count, dict): return if 'error' in block_count.keys(): return peers[i][2]=block_count['diffLength'] peers[i][3]=block_count['length'] length = tools.db_get('length') diffLength= tools.db_get('diffLength') size = max(len(diffLength), len(block_count['diffLength'])) us = tools.buffer_(diffLength, size) them = tools.buffer_(block_count['diffLength'], size) if them < us: give_block(peer, DB, block_count['length']) elif us == them: try: ask_for_txs(peer, DB) except Exception as exc: tools.log('ask for tx error') tools.log(exc) else: download_blocks(peer, DB, block_count, length) F=False my_peers=tools.db_get('peers_ranked') their_peers=cmd(peer, {'type':'peers'}) if type(my_peers)==list: for p in their_peers: if p not in my_peers: F=True my_peers.append(p) if F: tools.db_put('peers_ranked', my_peers)
def add_peer(peer, current_peers=0): if current_peers==0: current_peers=tools.db_get('peers_ranked') if peer not in map(lambda x: x[0][0], current_peers): tools.log('add peer') current_peers.append([peer, 5, '0', 0]) tools.db_put(current_peers, 'peers_ranked')
def peer_check(peer, DB): peers=tools.db_get('peers') if peers[peer]['length']==0 or random.random()<0.1: ask_for_count(peer) out=trade_peers(peer) if type(out)==dict and 'error' in out: return 1 peers=tools.db_get('peers') length = tools.db_get('length') diffLength= tools.db_get('diffLength') size = max(len(diffLength), len(peers[peer]['diffLength'])) us = tools.buffer_(diffLength, size) them = tools.buffer_(peers[peer]['diffLength'], size) if them < us: return give_block(peer, DB, peers[peer]['length']) elif us == them: try: ask_for_count(peer) trade_peers(peer) return ask_for_txs(peer, DB) except Exception as exc: tools.log('ask for tx error') tools.log(exc) else: return download_blocks(peer, DB, peers[peer]['length'], length)
def initialize_to_zero_votecoin(vote_id, address, DB, add_block): initialize_to_zero_helper(['votecoin', vote_id], address, DB) jury=tools.db_get(vote_id, DB) if 'members' not in jury: tools.log('initialized to zero error') if address not in jury['members']: adjust_list(['members'], vote_id, False, address, DB, add_block)
def __execute_delete_procedure(name, args): try: conn = __create_connection_for_insert_delete() cursor = conn.cursor() output = cursor.callproc(name, args) conn.commit() tools.log("PROCEDURE CALLED [DELETE]: %s args-output: %s" % (name, __str_args(args)), insert_db=False) except mysql.connector.Error as err: tools.log( type="ERROR", code="db", file_name="database.py", function_name="__execute_delete_procedure", message="ARGS: %s" % args, exception=err) finally: cursor.close() conn.close()
def slasher_verify(tx, txs, out, DB): address=tools.addr(tx) acc=tools.db_get(address) if acc['secrets'][str(tx['on_block'])]['slashed']: tools.log('Someone already slashed them, or they already took the reward.') return False if not sign_verify(tx['tx1'], [], [''], {}): tools.log('one was not a valid tx') return False if not sign_verify(tx['tx2'], [], [''], {}): tools.log('two was not a valid tx') return False tx1=copy.deepcopy(tx['tx1']) tx2=copy.deepcopy(tx['tx2']) tx1.pop('signatures') tx2.pop('signatures') tx1=unpackage(package(tx1)) tx2=unpackage(package(tx2)) msg1=tools.det_hash(tx1) msg2=tools.det_hash(tx2) if msg1==msg2: tools.log('this is the same tx twice...') return False if tx1['on_block']!=tx2['on_block']: tools.log('these are on different lengths') return False return True
def f(blocks_queue, txs_queue): def bb(): return blocks_queue.empty() def tb(): return txs_queue.empty() def ff(queue, g, b, s): while not b(): time.sleep(0.0001) try: g(queue.get(False)) except Exception as exc: tools.log('suggestions ' + s) tools.log(exc) while True: try: time.sleep(0.1) l=tools.local_get('length')+1 v=range(l-10, l) v=filter(lambda x: x>0, v) v=map(lambda x: tools.db_get(x), v) v=map(lambda x: x['block_hash'], v) if tools.local_get('stop'): tools.dump_out(blocks_queue) tools.dump_out(txs_queue) return while not bb() or not tb(): ff(blocks_queue, lambda x: add_block(x, v), bb, 'block') ff(txs_queue, add_tx, tb, 'tx') except Exception as exc: tools.log(exc)
def setupForFiles(self): log('settings() - setupForFiles') if self.files_preset == 1: #preset smooth saturation = 3.0 value = 10.0 speed = 20.0 autospeed = 0.0 interpolation = 0 threshold = 0.0 elif self.files_preset == 2: #preset action saturation = 3.0 value = 10.0 speed = 80.0 autospeed = 0.0 interpolation = 0 threshold = 0.0 elif self.files_preset == 3: #preset disabled saturation = 0.0 value = 0.0 speed = 0.0 autospeed = 0.0 interpolation = 0 threshold = 0.0 elif self.files_preset == 0: #custom saturation = self.files_saturation value = self.files_value speed = self.files_speed autospeed = self.files_autospeed interpolation = self.files_interpolation threshold = self.files_threshold return (saturation,value,speed,autospeed,interpolation,threshold)
def handleStereoscopic(self, isStereoscopic): log('settings() - handleStereoscopic(%s) - disableon3d (%s)' % (isStereoscopic, self.bobdisableon3d)) if self.bobdisableon3d and isStereoscopic: log('settings()- disable due to 3d') self.bobdisable = True else: self.resetBobDisable()
def decisions_keepers(vote_id, jury, DB): #this is returning something of length voters. wt=map(lambda x: x[0], weights(vote_id, DB, jury)) if wt=='error': return 'error' total_weight=sum(wt) matrix=decision_matrix(jury, jury['decisions'], DB) #exclude decisions with insufficient participation*certainty decisions=[] if len(matrix)<3: return [] if len(matrix[0])<5: return [] attendance=[] certainty=[] for decision in range(len(matrix[0])): a=0 c=0 for juror in range(len(matrix)): if not numpy.isnan(matrix[juror][decision]): a+=wt[juror] if matrix[juror][decision]==1: c+=wt[juror] else: c+=wt[juror]/2.0 attendance.append(a*1.0/total_weight) certainty.append(abs(c-0.5)*2.0/total_weight) out=[] for i in range(len(certainty)): if certainty[i]*attendance[i]>0.55: out.append(jury['decisions'][i]) else: tools.log('participation times certainty was too low to include this decision: ' +str(jury['decisions'][i])) return out
def slasher_verify(tx, txs, out, DB): address = tools.addr(tx) acc = tools.db_get(address) if acc["secrets"][str(tx["on_block"])]["slashed"]: tools.log("Someone already slashed them, or they already took the reward.") return False if not sign_verify(tx["tx1"], [], [""], {}): tools.log("one was not a valid tx") return False if not sign_verify(tx["tx2"], [], [""], {}): tools.log("two was not a valid tx") return False tx1 = copy.deepcopy(tx["tx1"]) tx2 = copy.deepcopy(tx["tx2"]) tx1.pop("signatures") tx2.pop("signatures") tx1 = unpackage(package(tx1)) tx2 = unpackage(package(tx2)) msg1 = tools.det_hash(tx1) msg2 = tools.det_hash(tx2) if msg1 == msg2: tools.log("this is the same tx twice...") return False if tx1["on_block"] != tx2["on_block"]: tools.log("these are on different lengths") return False return True
def test(self, dataset, remain_time_budget=None): result = self._context.stage.test(dataset, remain_time_budget) if self._context.is_ensemble: log("Start ensemble") self._y_hats.append(result) return np.asarray(self._y_hats).mean(axis=0) else: return result
def halt(self, sig=None, frame=None): if sig or frame: log(self, 'Terminated by SIGINT.') exit(0) log(self, 'Shutting down.') for mod in self._modules: mod.halt()
def call_one_for_all(self, input): # input unknown = ragged sensor """Each input element innervates all output elements""" # output = tf.foldl(lambda a, item: a + self.kernel(item), input) output = tf.map_fn(lambda item: self.kernel(item), input) log("call_one_for_all", output.shape, color="blue") output = tf.math.reduce_sum(output, axis=1) log("call_one_for_all", output.shape, color="blue") return output
def on_entryAction_key_press_event(self,widget,event): key = Gdk.keyval_name(event.keyval) tools.log("Key pressed :"+str(key),log_type=tools.LOG_GUI,class_type=self) if key == "Return": self.buttonRight.grab_focus() return False else: return self.syntax_completion(widget, event)
def fee_check(tx, txs, DB): address = addr(tx) truthcoin_cost = cost_0(txs, DB) acc=tools.db_get(address, DB) if int(acc['amount']) < truthcoin_cost: tools.log('insufficient truthcoin') return False return True
def ff(queue, g, b, s): while not b(): time.sleep(0.0001) try: g(queue.get(False)) except Exception as exc: tools.log('suggestions ' + s) tools.log(exc)
def forth(f, stack): if len(f)==0: return stack try: f[0]=float(f[0]) return forth(f[1:], [f[0]]+stack) except Exception as exc: tools.log(exc) return forth(f[1:], do[f[0]](stack))
def forth(f, stack): if len(f) == 0: return stack try: f[0] = float(f[0]) return forth(f[1:], [f[0]] + stack) except Exception as exc: tools.log(exc) return forth(f[1:], do[f[0]](stack))
def log_result(self, html_source, selection, amount): pattern = 'Not enough money for that operation.' msg = 'Attempted to Train %s %s' % (amount, selection) m = re.search(pattern, html_source) if m: msg = 'Not enough money for %s %s' % (amount, selection) tools.log(msg) print ''
def swLed(ev=None): global system_status if system_status is SystemStatus.READY: system_status = SystemStatus.RECORDING else: system_status = SystemStatus.READY setColor(system_status) log(system_status)
def name(bot, update): db = SQLite() global idlot update.message.reply_text(RU.description) name = update.message.text db.magic('update lot set head = (?) where id = {}'.format(idlot), (name, )) tools.log('Created lot {} with name {}'.format(idlot, name)) return DESCR
def add_from_queue(queue, add_function, is_queue_empty, queue_name): # while the queue is not empty, in other words there are suggested blocks or transactions, call the corresponding add function while not is_queue_empty(): time.sleep(0.0001) try: add_function(queue.get(False)) except Exception as exc: tools.log('suggestions ' + queue_name) tools.log(exc)
def info(DB, args): if len(args)<1: return ('not enough inputs') address=args[0] try: return(tools.local_get(address)) except Exception as exc: tools.log(exc) return(address+' is not in the local database. maybe you meant to do the command: "patty_info '+address+'"?')
def setupForStatic(self): log('settings() - setupForStatic') saturation = 4.0 value = 1.0 speed = 50.0 autospeed = 0.0 interpolation = 1 threshold = 0.0 return (saturation,value,speed,autospeed,interpolation,threshold)
def main(peers, DB): map(tools.add_peer, peers) try: while True: time.sleep(0.5)#changing this from 0.01 to 0.5 made blocks load way faster. the add_block queue was getting overfilled. if tools.local_get('stop'): return main_once(DB) except Exception as exc: tools.log(exc)