def make_query(q): data = {} base = 'https://dblp.org/search/publ/api?' search = 'q=' + '+'.join(limpar(q).split(' ')) form = '&format=json' hit = '&h=1' data['url'] = base + search + form + hit print(data['url']) r = requests.get(data['url']) print(r.status_code) d = r.json()['result'] print(d) if (limpar(q) == ' '): return if (int(d['hits']['@total']) == 0): return '' hits = d['hits']['hit'] for h in hits: data['type'] = h['info']['type'] pprint(data) return data['type']
def push_status_data(host, port): try: #connect server let the status data into redis psh_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) psh_s.connect((host, port)) RSA_signal, random_num = 'RSA_KEY_Virification', str( random_pass.randomPassword(10)) encrypted_data = key_gen.RSA_key.encrypt_RSA(key_gen.public_file, random_num) psh_s.sendall(json.dumps((RSA_signal, encrypted_data, random_num))) print '\033[34;1m sending status to Monitor server .... \033[0m' #判断RSA认证结果 RSA_status = psh_s.recv(1024) if RSA_status == 'RSA_OK': psh_s.send('StatusDataIntoRedis') status = psh_s.recv(1024) if status == "StatusDataIntoRedis_OK": return pull_status_data() else: pass except socket.error: pprint(socket.error, 'err', exit=1) #sys.exit("Socket connect error") finally: psh_s.close()
def message(self, pubnub, message): output_message = '' new_message = message.message print("\nNew message received: ") utils.pprint(new_message) device = new_message['device'] if device == DEVICE_HOSTNAME or device == "all": command_type = new_message['command_type'] incident = new_message['incident'] # execute the configuration type of commands if command_type == 'config': try: # parse the config commands or command command = new_message['commands'] command_list = command.split('!') comment = 'Configuration commands received: ' + command # print to Python console, log to host device, and update ServiceNow print(comment) execute('send log WhatsOp: ' + comment) service_now_apis.update_incident(incident, comment, SNOW_DEV) # submit the command using Python CLI, update incident with result output = configure(command_list) output_message = (str(output).replace('),', '),\n')).replace('[', ' ').replace(']', ' ') print(output_message) service_now_apis.update_incident(incident, output_message, SNOW_DEV) status_message = 'Configuration command Successful' except: status_message = "Configuration Command Executed" print(output_message) # execute the exec type of commands if command_type == 'exec': try: # parse the exec command command = new_message['commands'] comment = str('Exec command received: ' + command) # print to Python console, log to host device, and update ServiceNow print(comment) execute('send log WhatsOp: ' + comment) service_now_apis.update_incident(incident, comment, SNOW_DEV) # send the command to device using Python CLI output_message = execute(str(command)) service_now_apis.update_incident(incident, output_message, SNOW_DEV) # pretty print the command output to console out_list = output_message.split('\n') for items in out_list: if items is not "": print(items) status_message = 'Successful' except: status_message = 'Unsuccessful' print(str('\nCommand result: ' + status_message))
def indexOfPid(pid): for _pid, idx in tuple_PlatformID2Index: if _pid == int(pid): return idx else: pprint("[ indexOfPid ]: Not found pid index. pid:", pid) return -1
def refresh_move_objs(lvm_id, src_pv=None, dest_pv=None): lv = cfg.om.get_by_lvm_id(lvm_id) if lv: # Best guess is that the lv and the source & dest. # PV state needs to be updated, need to verify. utils.pprint('gen_signals: move LV %s' % (str(lvm_id)), "fg_yellow", "bg_black") lv.refresh() vg = cfg.om.get_by_path(lv.Vg) if vg: vg.refresh() if not src_pv and not dest_pv: for pv_object_path in vg.Pvs: pv = cfg.om.get_by_path(pv_object_path) if pv: pv.refresh() else: pv = cfg.om.get_by_lvm_id(src_pv) if pv: pv.refresh() pv = cfg.om.get_by_lvm_id(dest_pv) if pv: pv.refresh()
def __mining(self): start = time.time() self.blockchain.add_transaction( sender_blockchain_address=MINING_SENDER, recipient_blockchain_address=self.blockchain.blockchain_address, value=MINING_REWARD) nonce = self.blockchain.proof_of_work() if nonce == -1: return False previous_hash = self.blockchain.hash(self.blockchain.chain[-1]) is_created, block = self.blockchain.create_block(nonce, previous_hash) if not is_created: return False self.delete_transaction_for_all_peer() print({'action': 'mining', 'status': 'success'}) utils.pprint(self.blockchain.chain) self.send_all_chain_for_consensus() elapse = round(time.time() - start, 4) self.blockchain.difficulty_adjustment(elapse) # print('mining speed : ', str(round(self.mining_speed, 3))) # print('difficult : ', str(self.difficulty)) return True
def register(): user = request.forms.username.strip() #pwd = md5(request.forms.password.strip()).hexdigest() pwd = request.forms.password.strip() url = SERVERs[int(request.forms.server.strip()) - 1][1] t = int(time.time()) sign = _generate_sign(user, t) try: result = http_json_get_sync( url, dict(game=GAME_ID, user=user, pwd=pwd, timestamp=t, sign=sign)) if result['return_code'] == 1: return {'result': 0, 'data': ''} else: pprint("[ register ]", result) err = REGISTER_ERROR.get(result['return_code'], '未知错误') return { 'result': result['return_code'], 'data': '{0}, detail:{1}'.format(err, result['return_msg']) } except Exception, e: print_e() return {'result': 1, 'data': str(e)}
def push_status_data(host,port): try: #connect server let the status data into redis psh_s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) psh_s.connect((host,port)) RSA_signal,random_num = 'RSA_KEY_Virification', str(random_pass.randomPassword(10)) encrypted_data = key_gen.RSA_key.encrypt_RSA(key_gen.public_file,random_num) psh_s.sendall(json.dumps( (RSA_signal,encrypted_data, random_num) )) print '\033[34;1m sending status to Monitor server .... \033[0m' #判断RSA认证结果 RSA_status=psh_s.recv(1024) if RSA_status == 'RSA_OK': psh_s.send('StatusDataIntoRedis') status=psh_s.recv(1024) if status == "StatusDataIntoRedis_OK": return pull_status_data() else: pass except socket.error: pprint(socket.error, 'err',exit=1) #sys.exit("Socket connect error") finally: psh_s.close()
def main(argv: str) -> None: if len(argv) > 1: raise RuntimeError("Too much arguments") # validate releases args. if FLAGS.releases and FLAGS.releases not in DOCKERFILE_BUILD_HIERARCHY: log.critical( f"Invalid --releases arguments. Allowed: {DOCKERFILE_BUILD_HIERARCHY}" ) try: # Parse specs from manifest.yml and validate it. pprint(f"Validating {FLAGS.manifest_file}") release_spec = load_manifest_yaml(FLAGS.manifest_file) if FLAGS.validate: return # Setup manager client. manager = ManagerClient(release_spec, FLAGS.cuda_version) # generate templates to correct directory. if "dockerfiles" in FLAGS.generate: manager.generate(FLAGS.dry_run) # Build images. if "images" in FLAGS.generate: manager.build(FLAGS.dry_run) # Push images when finish building. if FLAGS.push: manager.push(dry_run=FLAGS.dry_run) except KeyboardInterrupt: log.info("Stopping now...") exit(1)
def track_progress(self, printing, message_optimization, risky): portfolio_statistics = self.statistics weights = self.weights weights_df = pd.DataFrame(weights, columns = ['Allocation Weights']) weights_df.index = self.ticker_list annual_return = portfolio_statistics['portfolio_annual_return'] annual_std = portfolio_statistics['portfolio_annual_std'] annual_sr = portfolio_statistics['portfolio_annual_sr'] if not risky : annual_return = annual_return * 0.9 + self.risk_free * 0.1 annual_std = 0.9 ** 2 * annual_std annual_sr = (annual_return - self.risk_free) / annual_std utils.portfolios['ret'].append(annual_return) utils.portfolios['std'].append(annual_std) utils.portfolios['sr'].append(annual_sr) messages = [] messages.append(message_optimization) messages.append(" Portfolio Annual Return (252 days) = {} % ".format(round(annual_return * 100, 3))) messages.append(" Portfolio Annual Standard Deviation (252 days) = {} ".format( round(annual_std, 3))) messages.append(" Portfolio Annual Sharpe Ratio (252 days) = {} ".format( round(annual_sr, 3))) if printing : utils.display(weights_df.T) if printing : utils.pprint(messages) return
def model_A(input_shape=None, output_shape=None): """ Returns compiled model. Defaults to None for all parameters so that errors are thrown if unexpected parameter given. :return: BaseModel instance with compiled keras model """ pprint(input_shape) pprint(output_shape) input = Input(shape=input_shape, name='input') # x = Conv1D(filters=128, # kernel_size=39, # activation='relu')(input) # x = Conv1D(filters=128, # kernel_size=39, # activation='relu')(x) # x = Conv1D(filters=128, # kernel_size=39, # activation='relu')(x) # x = Conv1D(filters=128, # kernel_size=39, # activation='relu')(x) x = Flatten()(input) x = Dense(output_shape[1], input_dim=input_shape[1], activation='relu')(x) preds = Dense(output_shape[1], activation='softmax')(x) model = Model(input, preds) model.compile(loss='categorical_crossentropy', optimizer='adamax', metrics=['categorical_accuracy', 'accuracy']) return model
def auth_registries(self) -> None: """Setup auth registries for different registries""" # https://docs.docker.com/registry/spec/auth/token/ pprint("Configure Docker") try: for registry, metadata in self.repository.items(): if not os.getenv(metadata["pwd"]): log.warning( f"If you are intending to use {registry}, make sure to set both " f"{metadata['pwd']} and {metadata['user']} under {os.getcwd()}/.env. Skipping..." ) continue else: _user = os.getenv(metadata["user"]) _pwd = os.getenv(metadata["pwd"]) log.info( f"Logging into Docker registry {registry} with credentials..." ) docker_client.api.login(username=_user, password=_pwd, registry=registry) except APIError: log.info( "TLS handshake timeout. You can try resetting docker daemon." "If you are using systemd you can do `systemctl restart docker`" ) docker_client.from_env(timeout=FLAGS.timeout)
def process_request(request, outfile = None): '''Generic request handler for /service request: avmap structure (from avmap.load) outfile: file-like object to write the response (if None, a string will be the return value with the response) ''' fakefile = outfile is None if fakefile: outfile = StringIO.StringIO() print "Request:" utils.pprint(request) response = AVDict("air.connect.Response") try: service = services[request['serviceName']] method = getattr(service, request['methodName']) except AttributeError: raise Exception() response['errorMessage'] = None response['errorReport'] = None response['state'] = 0 try: response['result'] = method(*request['parameters']) except Exception as exp: #Error is not working #response['errorMessage'] = "%s: %s" % (type(exp), exp) #response['errorReport'] = True raise print "Response:" utils.pprint(avmap.loads(avmap.dumps(response))) avmap.dump(response, outfile) if fakefile: return outfile.getvalue()
def model_C(input_shape=None, output_shape=None): pprint("Expected input shape: {}".format(input_shape)) pprint("Expected output shape: {}".format(output_shape)) model = Sequential() model.add( Dense(output_shape[1], input_dim=input_shape[1], kernel_initializer='glorot_uniform', kernel_regularizer=regularizers.l2(0), kernel_constraint=maxnorm(9e999))) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add( Dense(output_shape[1] * 2, kernel_initializer='glorot_uniform', kernel_regularizer=regularizers.l2(0), kernel_constraint=maxnorm(9e999))) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add( Dense(output_shape[1] * 2, kernel_initializer='glorot_uniform', kernel_regularizer=regularizers.l2(0), kernel_constraint=maxnorm(9e999))) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) # model.add(Dense(output_shape[1] * 2, # kernel_initializer='glorot_uniform', # kernel_regularizer=regularizers.l2(0), # kernel_constraint=maxnorm(9e999))) # model.add(Activation("relu")) # # model.add(BatchNormalization()) # model.add(Dropout(0.5)) model.add( Dense(output_shape[1], kernel_initializer='glorot_uniform', kernel_regularizer=regularizers.l2(0), kernel_constraint=maxnorm(9e999))) model.add(Activation('softmax')) optimus = Adam(lr=0.0001, decay=1e-5) model.compile(loss='categorical_crossentropy', optimizer=optimus, metrics=['accuracy']) return model
def get_ticker_risk_analysis(**kwargs): ''' function: This function 1. Uses `get_ticker_statistics` to calculate both RETURN & RISK descriptive statistics 2. Applies 'Capital Asset Pricing Model' to calculate α (alpha),β (beta) , correlation ρ For all this to succeed we define: - risk-free rate : the 3month Tbill - Market Index : S&P 500 ''' # 1 - check arguments if kwargs == {}: if not utils.check_argv( 5, "WARNING! Correct Usage: ./agora.py ticker-risk-analysis <ticker> <from> <to>" ): return ticker = sys.argv[2] start = sys.argv[3] end = sys.argv[4] printing = True else: ticker = kwargs['ticker'] start = kwargs['start'] end = kwargs['end'] printing = False # 2 # 2.1 - Get the data & descriptive statistics instrument = get_ticker_statistics(ticker=ticker, start=start, end=end) # 2.2 - Calculate [*] alpha α # [*] beta β # [*] correlation of instrument & market ρ instrument.risk_analysis() risk_analysis_statistics = instrument.risk_analysis_statistics # 3 - print result # RISK messages = [] messages.append( " Let's apply CAPM modelling for Risk Analysis [ Market : S&P500 , Instrument : {}] " .format(ticker)) messages.append(" Correlation [ρ] = {} ".format( round(risk_analysis_statistics['correlation'], 3))) messages.append(" Alpha [α] = {} ".format( round(risk_analysis_statistics['alpha'], 3))) messages.append(" Beta [β] = {} ".format( round(risk_analysis_statistics['beta'], 3))) messages.append(" Sharpe Ratio [SR] = {} ".format( round(risk_analysis_statistics['sharpe_ratio'], 3))) messages.append(" R Squared [R^2] = {} % ".format( round(risk_analysis_statistics['r_squared'] * 100, 3))) if printing: utils.pprint(messages) return instrument
def do_login(): _user = request.params.get( 'username', '' ) _pass = request.params.get( 'password', '' ) if _user and _pass and check_password( _user, _pass ): set_session( _user ) return '<script language="javascript">top.location="/";</script>' else: pprint( '[ do_login ]failed.', _user, _pass ) redirect( '/login' )
def do_login(): _user = request.params.get('username', '') _pass = request.params.get('password', '') if _user and _pass and check_password(_user, _pass): set_session(_user) return '<script language="javascript">top.location="/";</script>' else: pprint('[ do_login ]failed.', _user, _pass) redirect('/login')
def main(): """ The script will create an APIC-EM path trace using the Cisco DevNet Sandbox: https://sandboxapicem.cisco.com/. We will retrieve the path trace details and ask the user if to delete the created path trace at the end. APIC-EM url, username and password are configured in the modules_init module. Change these variables if needed to test with a different controller. The code will use functions included with the apic_em_apis module. Source IP address: 10.1.15.117 Destination IP address: 10.2.1.22 """ # declarations for source and destination IP addresses, # change these variables if needed to test with different IP addresses src_ip = '10.1.15.117' dest_ip = '10.2.1.22' # testing APIC-EM Path Trace APIs print('\n\n\nPath Trace APIC-EM APIs\n') # create an APIC-EM ticket apic_em_ticket = apic_em_apis.get_service_ticket(EM_USER, EM_PASSW) print('\nAPIC-EM ticket: ', apic_em_ticket) # create a path trace between the source and destination IP addresses path_trace_id = apic_em_apis.create_path_trace(src_ip, dest_ip, apic_em_ticket) print('\nAPIC-EM Path Trace id created: ', path_trace_id) # wait 10 seconds for the path trace to complete print('\nWait 10 seconds for the Path Trace to complete') time.sleep(10) # retrieve the path trace details path_trace = apic_em_apis.get_path_trace_info(path_trace_id, apic_em_ticket) print('\nPath Trace details: \n') utils.pprint(path_trace) # optional step to delete the path trace user_input = input('\nDo you want to delete the created Path Trace (y/n) ') if user_input == 'y': delete_path_trace = apic_em_apis.delete_path_trace( path_trace_id, apic_em_ticket) if delete_path_trace == 202: print('\nPath Trace deleted') print('\n\nEnd of Application Run\n')
def check_password( user, passwd ): res = False user = user.strip() passwd = passwd.strip() _pass_in_redis = redis.hget( REDIS_PASSWORD_KEY, user ) pprint('user: '******'passwd: ', passwd, '_pass_in_redis: ', _pass_in_redis) res = True # ( _pass_in_redis == passwd ) return res
def test(parser, code=None): if code is None: code = parser.random_code() pprint(code) elif code.strip() == "": return parser.new_game(world_size=(8, 8)) parser.draw("Input: ") parser.run(code, debug=False) parser.draw("Output: ")
def check_password(user, passwd): res = False user = user.strip() passwd = passwd.strip() _pass_in_redis = redis.hget(REDIS_PASSWORD_KEY, user) pprint('user: '******'passwd: ', passwd, '_pass_in_redis: ', _pass_in_redis) res = True # ( _pass_in_redis == passwd ) return res
def draw_rows(): global screen_rows, screen_cols, file_loaded, file_rows, row_offset, column_offset, dirty welcome_message = "peditor -- welcome" for row in range(screen_rows): file_row = row + row_offset if file_row < len(file_rows): render_row(file_rows[file_row][column_offset:column_offset + screen_cols]) if row == screen_rows // 3 and not file_loaded and not dirty: pad_string = " " * ((screen_cols - len(welcome_message)) // 2) pprint(pad_string, welcome_message) pprint("\n")
def __validate_and_load_config(self): try: self.updater = Updater(self.config['TELEGRAM_API_TOKEN'], user_sig_handler=self.__emergency_stop) except ValueError: utils.pprint('Missing API token! Write it in the config.json file') return -1 try: self.plugin_list = self.config['plugin_list'] except Exception: utils.pprint( 'Missing plugin list. Set it up in the config.json file') return -1 return 0
def compute_propagated_relevance(self, layer, relevance): """ This method computes the backward pass for the incoming relevance for the specified layer. Args: layer: Layer to be reverted. relevance: Incoming relevance from higher up in the network. Returns: The """ if isinstance( layer, (torch.nn.MaxPool1d, torch.nn.MaxPool2d, torch.nn.MaxPool3d)): return self.max_pool_nd_inverse(layer, relevance).detach() elif isinstance(layer, (torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d)): return self.conv_nd_inverse(layer, relevance).detach() elif isinstance(layer, torch.nn.LogSoftmax): # Only layer that does not conserve relevance. Mainly used # to make probability out of the log values. Should probably # be changed to pure passing and the user should make sure # the layer outputs are sensible (0 would be 100% class probability, # but no relevance could be passed on). if relevance.sum() < 0: relevance[relevance == 0] = -1e6 relevance = relevance.exp() if not self.warned_log_softmax: pprint("WARNING: LogSoftmax layer was " "turned into probabilities.") self.warned_log_softmax = True return relevance elif isinstance(layer, self.allowed_pass_layers): # The above layers are one-to-one mappings of input to # output nodes. All the relevance in the output will come # entirely from the input node. Given the conservation # of relevance, the input is as relevant as the output. return relevance elif isinstance(layer, torch.nn.Linear): return self.linear_inverse(layer, relevance).detach() else: raise NotImplementedError( "The network contains layers that" " are currently not supported {0:s}".format(str(layer)))
def copy(ntm, seq_length, sess, print_=True): """ Run the copy task given a trained NTM""" start_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32) start_symbol[0] = 1 end_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32) end_symbol[1] = 1 seq = generate_copy_sequence(seq_length, ntm.cell.input_dim - 2) feed_dict = {input_: vec for vec, input_ in zip(seq, ntm.inputs)} feed_dict.update( {true_output: vec for vec, true_output in zip(seq, ntm.true_outputs)}) feed_dict.update({ ntm.start_symbol: start_symbol, ntm.end_symbol: end_symbol }) input_states = [ state['write_w'][0] for state in ntm.input_states[seq_length] ] output_states = [ state['read_w'][0] for state in ntm.get_output_states(seq_length) ] result = sess.run(ntm.get_outputs(seq_length) + \ input_states + output_states + \ [ntm.get_loss(seq_length)], feed_dict=feed_dict) is_sz = len(input_states) os_sz = len(output_states) outputs = result[:seq_length] read_ws = result[seq_length:seq_length + is_sz] write_ws = result[seq_length + is_sz:seq_length + is_sz + os_sz] loss = result[-1] if print_: np.set_printoptions(suppress=True) print(" true output : ") pprint(seq) print(" predicted output :") pprint(np.round(outputs)) print(" Loss : %f" % loss) np.set_printoptions(suppress=False) else: return seq, outputs, read_ws, write_ws, loss
def is_admin(): res = False cookie = request.get_cookie(COOKIE_KEY, secret=COOKIE_SECRET) if cookie: try: user, _ = cookie.split('.') user = user.strip() res = redis.sismember(REDIS_ADMIN_USER, user) except: print_e() pprint('[ is_admin ]', user, res, cookie) return res
def model_B(input_shape=None, output_shape=None): pprint(input_shape) pprint(output_shape) model = Sequential() model.add(Dense(output_shape[1], input_dim=input_shape[1])) model.add(Activation("relu")) model.add(Dense(output_shape[1])) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adamax', metrics=['accuracy']) return model
def is_admin(): res = False cookie = request.get_cookie( COOKIE_KEY, secret = COOKIE_SECRET ) if cookie: try: user, _ = cookie.split('.') user = user.strip() res = redis.sismember( REDIS_ADMIN_USER, user ) except: print_e() pprint('[ is_admin ]', user, res, cookie) return res
def load_cases(): dut = simple_dut() suite_func_list = {} suite_perf_list = {} for suite in suites: test_module = __import__('TestSuite_' + suite) for classname, test_class in get_subclasses(test_module, TestCase): test_suite = test_class(dut, None, None, suite) func_cases = get_functional_test_cases(test_suite) perf_cases = get_performance_test_cases(test_suite) suite_func_list[suite] = func_cases suite_perf_list[suite] = perf_cases print pprint(suite_func_list) print pprint(suite_perf_list)
def Refresh(self): """ Take all the objects we know about and go out and grab the latest more of a test method at the moment to make sure we are handling object paths correctly. Returns the number of changes, object add/remove/properties changed """ #cfg.om.refresh_all() utils.pprint('Manager.Refresh - entry', 'bg_black', 'fg_light_red') rc = load(refresh=True) utils.pprint('Manager.Refresh - exit %d' % (rc), 'bg_black', 'fg_light_red') return rc
def predict(ntm, seq_length, sess, print_=True): """ Run the predict task, wherein the network must predict the next input given a periodic input, given a trained NTM. It's basically a variant of the copy task where the start_symbol and end_symbol depend on the signal""" assert seq_length % 2 == 0 start_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32) start_symbol[0] = 1 end_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32) end_symbol[1] = 1 seq = generate_predict_sequence(seq_length, ntm.cell.input_dim - 2) feed_dict = {input_:vec for vec, input_ in zip(seq, ntm.inputs)} feed_dict.update( {true_output:vec for vec, true_output in zip(seq, ntm.true_outputs)} ) feed_dict.update({ ntm.start_symbol: start_symbol, ntm.end_symbol: end_symbol }) input_states = [state['write_w'][0] for state in ntm.input_states[seq_length]] output_states = [state['read_w'][0] for state in ntm.get_output_states(seq_length)] result = sess.run(ntm.get_outputs(seq_length) + \ input_states + output_states + \ [ntm.get_loss(seq_length)], feed_dict=feed_dict) is_sz = len(input_states) os_sz = len(output_states) outputs = result[:seq_length] read_ws = result[seq_length:seq_length + is_sz] write_ws = result[seq_length + is_sz:seq_length + is_sz + os_sz] loss = result[-1] if print_: np.set_printoptions(suppress=True) print(" true output : ") pprint(seq) print(" predicted output :") pprint(np.round(outputs)) print(" Loss : %f" % loss) np.set_printoptions(suppress=False) else: return seq, outputs, read_ws, write_ws, loss
def aggregate_data(): # aggregate data into train/test arrays with appropriate labels parsed_data_files = os.listdir('parsed_data') parsed_data_files.sort() total_examples = 0 train_arrays = np.empty((0, num_dimensions)) train_labels = np.empty(0) test_arrays = np.empty((0, num_dimensions)) test_labels = np.empty(0) for file in parsed_data_files: with open('parsed_data/' + file, 'r') as f: pprint(file) if file[:4] == 'test': # test file for idx, line in enumerate(f): test_arrays = np.append( test_arrays, np.reshape( model[file.replace('-', '_').upper()[:-4] + '_' + str(idx)], (1, num_dimensions)), axis=0) test_labels = np.append( test_labels, file.replace('-', '_').upper()[5:-4] ) # ignore preceding 'TEST_' and '.txt' extension else: # train file for idx, line in enumerate(f): train_arrays = np.append( train_arrays, np.reshape( model[file.replace('-', '_').upper()[:-4] + '_' + str(idx)], (1, num_dimensions)), axis=0) train_labels = np.append( train_labels, file.replace('-', '_').upper()[6:-4] ) # ignore preceding 'TRAIN_' and '.txt' extension total_examples += idx + 1 np.save(train_arrays_outfile, train_arrays) np.save(test_arrays_outfile, test_arrays) np.save(train_labels_outfile, train_labels) np.save(test_labels_outfile, test_labels) return (train_arrays, test_arrays, train_labels, test_labels)
def handle_external_event(event, lvm_id, lvm_uuid, seq_no): utils.pprint("External event: '%s', '%s', '%s', '%s'" % (event, lvm_id, lvm_uuid, str(seq_no))) event_complete() # Let's see if we have the VG and if the sequence numbers match, if # they do we have nothing to process (in theory) # We can try to be selective about what we re-fresh, but in reality # it takes just as long to selectively re-fresh as it does to grab # everything and let stuff sort itself out. if lvm_uuid and lvm_id: # If we are supplied with these, lets see if we need to update vg = cfg.om.get_by_uuid_lvm_id(lvm_uuid, lvm_id) if not (event == 'vg_update' and vg and vg.Seqno == seq_no): load(refresh=True) else: load(refresh=True)
def keyword_delete(db): db.execute('SELECT lang_id FROM tb_lang') data = db.fetchall() all_table = ['tb_keyword_%s' % lang_id for lang[0] in data] all_table.append('tb_keyword') key = request.forms.key.strip() for table in all_table: sql = 'DELETE FROM %s WHERE k="%s"' % (table_name, key) try: db.execute(sql) except Exception, e: pprint('[ keyword_delete ]:sql', sql) return {'result': 1, 'data': str(e)}
def keyword_delete(db): db.execute('SELECT lang_id FROM tb_lang') data = db.fetchall() all_table = [ 'tb_keyword_%s' % lang_id for lang[0] in data ] all_table.append( 'tb_keyword' ) key = request.forms.key.strip() for table in all_table: sql = 'DELETE FROM %s WHERE k="%s"' % (table_name, key) try: db.execute(sql) except Exception, e: pprint('[ keyword_delete ]:sql', sql) return {'result':1, 'data':str(e)}
def printf(s, color=None): code = 0 if color == 'red': code = 1 elif color == 'green': code = 2 elif color == 'yellow': code = 3 elif color == 'blue': code = 4 pre = '\x1b[3%dm' % code suf = '\x1b[39m' if color: pprint("%s%s%s" % (pre, s, suf)) else: pprint(s)
def runHardyCrossIteration(K, N, NODES, Q, QNODE, F, loops, lengths, diameters, iteration): # correction factors cfs = {} correction_factors = [] for loop_i, current_loop in enumerate(loops): pprint('loop = {}, current_loop = {}'.format(loop_i+1, current_loop), file=FILE) s_hl, s = 0, 0 nodes = [] for nn in current_loop: node1, node2 = nn nodes.append(nn) if nn not in cfs: cfs[nn] = [] # the assumed flow rate and k q_a = NODES[node1][node2][Q] k = NODES[node1][node2][K] f = NODES[node1][node2][F] # calc for this iteration hl = k * (abs(q_a)**N) hl = -hl if q_a < 0 else hl result = N * abs(hl/q_a) # sigma s_hl += hl s += result # qo, hlo, itero = q_a, hl, iter # display iter result pprint('node: {} to {}, q = {}, k = {}, F = {}, H_L = {}, iter = {}'.format( node1, node2, q_a, k, f, hl, result ), file=FILE ) # correction factor delta = -(s_hl / s) # relate correction factor to all nodes of this iteration for nn in nodes: cfs[nn].append(delta) pprint(f'delta = {delta}\n', file=FILE) correction_factors.append(delta) correction_factors = np.array(correction_factors) pprint('Correction\n============\n{}\n'.format( correction_factors), file=FILE) # update the nodes updateNodes(cfs, Q, QNODE, NODES, lengths, diameters, iteration) return correction_factors
def compare(): res = False cookie = request.get_cookie(COOKIE_KEY, secret=COOKIE_SECRET) _session_redis = '' if cookie: try: cid, session = cookie.split('.') _session_redis = redis.hget(REDIS_SESSION_KEY, cid) res = (_session_redis == session) except: print_e() pprint('[ compare ]cookie:', cookie, _session_redis, res) return res
def compare(): res = False cookie = request.get_cookie( COOKIE_KEY, secret = COOKIE_SECRET ) _session_redis = '' if cookie: try: cid, session = cookie.split('.') _session_redis = redis.hget( REDIS_SESSION_KEY, cid ) res = ( _session_redis == session ) except: print_e() pprint( '[ compare ]cookie:', cookie, _session_redis, res ) return res
def refresh_screen(): scroll_editor() pprint("\x1b[?25l") # hide cursor pprint("\x1b[2J") # clear entire screen pprint("\x1b[H") # reposition cursor draw_rows() draw_status_bar() draw_message_bar() update_cursor() pprint("\x1b[?25h") # show cursor
def copy(ntm, seq_length, sess, print_=True): """ Run the copy task given a trained NTM""" start_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32) start_symbol[0] = 1 end_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32) end_symbol[1] = 1 seq = generate_copy_sequence(seq_length, ntm.cell.input_dim - 2) feed_dict = {input_:vec for vec, input_ in zip(seq, ntm.inputs)} feed_dict.update( {true_output:vec for vec, true_output in zip(seq, ntm.true_outputs)} ) feed_dict.update({ ntm.start_symbol: start_symbol, ntm.end_symbol: end_symbol }) input_states = [state['write_w'][0] for state in ntm.input_states[seq_length]] output_states = [state['read_w'][0] for state in ntm.get_output_states(seq_length)] result = sess.run(ntm.get_outputs(seq_length) + \ input_states + output_states + \ [ntm.get_loss(seq_length)], feed_dict=feed_dict) is_sz = len(input_states) os_sz = len(output_states) outputs = result[:seq_length] read_ws = result[seq_length:seq_length + is_sz] write_ws = result[seq_length + is_sz:seq_length + is_sz + os_sz] loss = result[-1] if print_: np.set_printoptions(suppress=True) print(" true output : ") pprint(seq) print(" predicted output :") pprint(np.round(outputs)) print(" Loss : %f" % loss) np.set_printoptions(suppress=False) else: return seq, outputs, read_ws, write_ws, loss
def register(): user = request.forms.username.strip() #pwd = md5(request.forms.password.strip()).hexdigest() pwd = request.forms.password.strip() url = SERVERs[int(request.forms.server.strip()) - 1][1] t = int(time.time()) sign = _generate_sign(user, t) try: result = http_json_get_sync(url, dict(game=GAME_ID, user=user, pwd=pwd, timestamp=t, sign=sign)) if result['return_code'] == 1: return {'result':0, 'data':''} else: pprint("[ register ]", result) err = REGISTER_ERROR.get(result['return_code'], '未知错误') return {'result':result['return_code'], 'data':'{0}, detail:{1}'.format(err, result['return_msg'])} except Exception, e: print_e() return {'result':1, 'data':str(e)}
def payment_local(): svc_idx = int(request.forms.svc.strip()) account = request.forms.account.strip() charge_id = int(request.forms.charge_id.strip()) server_id = int(request.forms.server_id.strip()) host, port, server_id = svcs[indexOfPid(svc_idx)][1:] pprint('[ payment_local ]svc_idx:', svc_idx, 'svc host:', host, 'svc port:', port, 'account:', account, 'charge_id:', charge_id, 'server_id:', server_id) args = account, charge_id, server_id, 1, 1, 1, 0, int(time.time()) sign = gm_sign(*args) require_args = args + (sign, ) args = require_args # dumps(require_args) try: res = gm_post(host, port, 'payment', dict(args=dumps(args))) except Exception, e: res = 'Exception', str(e) print_e()
def exe_gm_cmd(): cmd_cmd = request.forms.cmd_cmd.strip() svc_idx = int(request.forms.svc.strip()) cmd_args = (request.forms.cmd_args.strip()).split(' ') host, port, server_id = svcs[svc_idx][1:] cmd_args.append(server_id) pprint('cmd:', cmd_cmd, 'svc host:', host, 'svc port:', port, 'args:', cmd_args) ts = int(time.time()) cmd_args = dumps(cmd_args) sign = gm_sign(cmd_cmd, ts, cmd_args) args = {'cmd':cmd_cmd, 'ts':ts, 'args':cmd_args, 'sign':sign} #require_args = [cmd_cmd, 1, cmd_args, sign] #args = require_args # dumps(require_args) try: res = gm_post(host, port, 'gm', args) except Exception, e: res = 'Exception', str(e)
if len(sys.argv) != 2: print "Usage:" print "python untire.py bioguideID" sys.exit() print "Loading current YAML..." y = load_data("legislators-current.yaml") print "Loading historical YAML..." y1 = load_data("legislators-historical.yaml") for moc in y1: if moc["id"].get("bioguide", None) != sys.argv[1]: continue print "Updating:" pprint(moc["id"]) print pprint(moc["name"]) moc["terms"].append( OrderedDict( [ ("type", moc["terms"][-1]["type"]), ("start", None), ("end", None), ("state", moc["terms"][-1]["state"]), ("party", moc["terms"][-1]["party"]), ] ) )
def trigger_handle(**kargs): '''kargs: obj= service configuration service_data = real monitored service data from client ''' severity = ['Disaster', 'Urgent', 'Problem', 'Warning', 'Information'] #print '\033[43;1m---trigger---\033[0m' #, kargs if not kargs.has_key('obj'):pprint('Lack of service configration data','err') if not kargs.has_key('service_data'):pprint('Lack of service data','err') expression = json.loads(kargs['obj'].trigger.expression) #for k,v in expression.items(): # print k,v # #print '--------------------------in looop --------------' for s in severity: #print '\033[41;1m ---%s ----\033[0m'%s #'Warning':[{'item_key':'iowait','operator':'>','value':'80','logic':"else", 'handler': 'sum', 'mintues':10},{'item_key':'idle','operator':'<','value':'60','logic':None,'handler': 'sum', 'mintues':10}], for key,conditions in expression.items():#loop each trigger list #print '--$-->',key ,conditions #print '--------------------------in looop --------------2' for exp_line in conditions: #loop condition list in each severity #get item value handler first this_loop_satisfied = True last_loop_satisfied = True relation_with_last = 'OR' #print '--------------------------in looop --------------4' for exp in exp_line: #print '----->',exp value_handler = exp['handler'] operator = exp['operator'] relation_with_next_one = exp['logic'] mintues = exp.get('mintues') value = exp.get('value') print '\033[34;1m EXP: \033[0m', value_handler,mintues, operator, value, relation_with_next_one if relation_with_last is not None: if relation_with_last == 'OR': # so last_loop_satisfied won't necessarily to be True if last_loop_satisfied: #no need to carry on print 'Last loop has satisfied already ,will not go on ,continue' continue else: print 'OR: Last one did not work ,will test this loop again' elif relation_with_last == 'AND': if last_loop_satisfied: # last_loop_satisfied must be True print 'AND: last one worked , will test this one ' else: print 'AND: last one did not satisfied, break' break else: print 'no match'
host, port, server_id = svcs[indexOfPid(svc_idx)][1:] pprint('[ payment_local ]svc_idx:', svc_idx, 'svc host:', host, 'svc port:', port, 'account:', account, 'charge_id:', charge_id, 'server_id:', server_id) args = account, charge_id, server_id, 1, 1, 1, 0, int(time.time()) sign = gm_sign(*args) require_args = args + (sign, ) args = require_args # dumps(require_args) try: res = gm_post(host, port, 'payment', dict(args=dumps(args))) except Exception, e: res = 'Exception', str(e) print_e() pprint("Res of gm post:", res) return {'data':str(res)} def gm_sign(*args): h = BASE_SESSION_HASH.copy() h.update(''.join(map(str, args))) return h.hexdigest() def gm_post(host, port, uri, args): import urllib, urllib2 from json import dumps #params = urllib.urlencode(dict(args=dumps(args))) params = urllib.urlencode(args) URL = 'http://{0}:{1}/{2}'.format(host, port, uri) req = urllib2.Request(URL, params)
if isinstance(monitor_dic,dict): #loop each host from DB for h,value in monitor_dic.items(): print '\033[42;1m %s \033[0m' %h #print value if latest_monitor_data.has_key(h): #make sure host is in the lastest monitor data #loop each service in this host for service_key, service_obj in value['service'].items(): print service_key, service_obj.check_interval, service_obj.trigger client_service_data = latest_monitor_data[h]['result_values'][service_key] #check if this service links to any trigger if service_obj.trigger: #go through trigger expression first trigger_handle(obj= service_obj, service_data =client_service_data ) #print service_obj.trigger.name else: #if not trigger links , only store the data in redis print 'will save this data to redis later', service_key #loop each item in this service """for item in service_obj.item_list.values(): #print item #only go through it when it's enabled if item['enabled']: #compare the item key with the data sent by client client_item_key = latest_monitor_data[h]['result_values'][service_key] #print client_item_key.get(item['key']), item['key'] trigger_handle(obj= service_obj,item_key=item['key'] , clien_item_data = client_item_key ) """ else: #no monitor data for this host , definitely something went wrong pprint("No monitor data for this host!" , 'err')
print "python retire.py bioguideID termEndDate" sys.exit() try: parse_date(sys.argv[2]) except: print "Invalid date: ", sys.argv[2] sys.exit() y = load_data("legislators-current.yaml") y1 = load_data("legislators-historical.yaml") for moc in y: if moc["id"].get("bioguide", None) != sys.argv[1]: continue print "Updating:" pprint(moc["id"]) print pprint(moc["name"]) print pprint(moc["terms"][-1]) moc["terms"][-1]["end"] = sys.argv[2] y.remove(moc) y1.append(moc) break save_data(y, "legislators-current.yaml") save_data(y1, "legislators-historical.yaml")
def gradient_descent_experiment(true_rdm=None, num_reads=100000): #genome = get_ecoli_genome(at_lab=False) G = len(genome) w = 10 mfl = 250 lamb = 1.0/mfl simulating_data = False if true_rdm is None: simulating_data = True true_matrix = [[-2, 0, 0, 0] for i in range(w)] true_mu = -20 true_eps = score_genome_np(true_matrix, genome) true_ps = fd_solve_np(true_eps, true_mu) true_reads = reads_from_ps(true_ps, mfl, min_seq_len=75, num_reads=num_reads) true_rdm = density_from_reads(true_reads, G) true_state = ((true_matrix, true_mu), true_eps) true_ll = logf(true_state) if simulating_data else None matrix = random_energy_matrix(w) mu = -20 eps = score_genome_np(matrix, genome) init_state = ((matrix, mu), eps) logf = lambda state:timestamp(complete_log_likelihood(state, true_rdm, lamb, num_reads=num_reads)) dw = 0.1 dmu = 0.1 old_ll = 0 print "true_ll:", true_ll cur_ll = logf(init_state) eta = 10**-7 # learning rate iterations = 0 while cur_ll > old_ll or iterations == 0: old_ll = cur_ll dmat = [[0]*4 for i in range(w)] for i in range(w): for j in range(4): print "i, j:", i, j new_mat = [row[:] for row in matrix] new_mat[i][j] += dw fwd_eps, rev_eps = eps new_eps = update_scores_np(fwd_eps, rev_eps, i, j, dw, w, genome) new_state = ((new_mat, mu), new_eps) new_ll = logf(new_state) print "cur ll, new_ll:", cur_ll, new_ll, "(improvement)" if new_ll > cur_ll else "(worsening)" delta_w = (new_ll - cur_ll)/dw * eta print "delta_w:", delta_w dmat[i][j] = delta_w new_mu = mu + dmu new_state = ((matrix, new_mu), eps) new_ll = logf(new_state) print "mu:" print "cur ll, new_ll:", cur_ll, new_ll, "(improvement)" if new_ll > cur_ll else "(worsening)" delta_mu = (new_ll - cur_ll)/dmu * eta print "delta_mu:", delta_mu old_matrix = [row[:] for row in matrix] for i in range(w): for j in range(4): matrix[i][j] += dmat[i][j] old_eps = np.array(eps) eps = score_genome_np(matrix, genome) old_mu = mu mu += delta_mu cur_state = ((matrix, mu), eps) cur_ll = logf(cur_state) print "\nresults of iteration %s:" % iterations pprint(matrix) print mu print "likelihood:", old_ll, "->", cur_ll iterations += 1 return ((old_matrix, old_mu), old_eps)
def arena_robot_page(db): _q = request.query _lang = _q.lang last_col = '''<button id="delete" style="widht:20px;height:20px;" onclick="return arena_robot_delete('{0}');">删除</button>''' _table = 'tb_arena_robot_%s' % _lang if _lang and _lang != '0' else TABLE_NAME _sql = sql_arena_robot.format( _table ) + ' %s %s %s' _filter_sql = 'SELECT SQL_CALC_FOUND_ROWS %s FROM {0} %s %s %s'.format(_table) _len_sql = 'SELECT COUNT(*) FROM {0}'.format(_table) limit = "LIMIT {0},{1}".format(_q.iDisplayStart, _q.iDisplayLength) order = 'ORDER BY ' for i in xrange(int(_q.iSortingCols)): _attr = 'iSortCol_%d' % i if getattr(_q, 'bSortable_%s' % getattr(_q, _attr)) == 'true': asc = getattr(_q, 'sSortDir_%d' % i) order += '%s %s, ' % (field_arena_robot[int(getattr(_q, _attr))], asc) order = order[:-2] if len(order) <= 8: order = '' where = '' if _q.sSearch: where = 'WHERE (' for f in field_arena_robot: where += '{0} LIKE "%{1}%" OR '.format( f, _q.sSearch ) where = where[:-3] + ')' for i in xrange(len(field_arena_robot)): _attr = getattr( _q, 'bSearchable_%d' % i, None ) if _attr == 'true': _attr = getattr( _q, 'sSearch_%d' % i, '' ) if _attr: where = ' AND ' if where else 'WHERE ' where += '`{0}` LIKE "%{1}%"'.format( field_arena_robot[i], _attr ) _sql = _sql % ( where, order, limit ) _s_fields = ','.join( ( '%s' % f for f in field_arena_robot ) ) _filter_sql = _filter_sql % ( _s_fields, where, order, limit ) pprint('[ arena_robot_page ]: _filter_sql:', _filter_sql) db.execute( _filter_sql ) db.execute( 'SELECT FOUND_ROWS()' ) _filtered = db.fetchone()[0] db.execute( _len_sql ) _total = db.fetchone()[0] _echo = _q.sEcho pprint('[ arena_robot_page ]: sql:', _sql ) db.execute( _sql ) _aadata = [ r + ( last_col.format( r[0] ), ) for r in db.fetchall() ] data = { 'sEcho': int(_echo), 'iTotalRecords': _total, 'iTotalDisplayRecords': _filtered, 'aaData': _aadata } return data
#If all else fails, try to convert this into a AVDict _dump(avdict(obj), fp) if __name__ == "__main__": test_avmap = lambda obj: loads(dumps(obj)) == obj assert test_avmap(None) assert test_avmap(1) assert test_avmap("A string") assert test_avmap([1, 2, 3]) assert test_avmap(AVDict("Test", {"1":2, "Test":2, "Test2":[1, 2, 3]})) root = "data" if os.path.exists(root): for n in os.listdir(root): print "Reading file: %s" % n with open(os.path.join(root, n)) as f: pprint(loads(f.read())) #stream = dumps(['clientVersions']) #data = [] #for i in stream: # if (i > 'a' and i < 'z') or (i > 'A' and i < 'Z'): # data += [i] # else: # data += "\\%x" % struct.unpack("b", i) #print ''.join(data) #obj = AVDict("Teste", {"v1":1,"v3":[1, 2, 3, "Teste", [4, 5], AVDict("Teste2", {"1":3})]})