def web_login(nickname,password): global g_loginFailedCountLock if not g_loginFailedCountLock: g_loginFailedCountLock = threading.RLock() utility.get_timer().setInterval(auto_del_timer,30*60*1000) key = nickname if key: g_loginFailedCountLock.acquire() count = 0 if key in g_loginFailedCount: count = g_loginFailedCount[key] g_loginFailedCountLock.release() if count >= 20: utility.write_log(-1,nickname+"登陆次数过多"+str(count),0) return {"result":"false","msg":"登陆次数过多,请30分钟后重试"} user = session.login(nickname,password) if user: g_loginFailedCountLock.acquire() if key in g_loginFailedCount: del g_loginFailedCount[key]; g_loginFailedCountLock.release() return {"result":"true","msg":(user.nickname+", 欢迎您回来"),"session":user.session_id} else: g_loginFailedCountLock.acquire() if key in g_loginFailedCount: g_loginFailedCount[key]+=1 else: g_loginFailedCount[key]=1 g_loginFailedCountLock.release() return {"result":"false","msg":"用户名或密码不正确"}
def on_tutorial(self): try: user_id = self.info['id'] log_type = jsd["lg_log"] log_data = "User: "******" loading tutorial" action = "Load Tutorial" risk = jsd["rsk_none"] utility.write_log(user_id, log_type, log_data, action, risk) os.system('tutorial.py') except: QtWidgets.QMessageBox.warning( self, 'Warning', "Unable To Load Tutorial At The Moment. Please Try Again Later!!!" ) user_id = jsd["system"] log_type = jsd["lg_warning"] log_data = "Unable to load tutorial for user: "******"Load Tutorial" risk = jsd["rsk_low"] utility.write_log(user_id, log_type, log_data, action, risk)
def login(loginName,password): user = _is_repeat_login(loginName,password) if user: return user c = utility.get_cursor() c.execute("SELECT ID,NickName,Sex,birthdayYear,certfState,Password FROM u_user WHERE password=? AND nickname=?",(password,loginName)) rows = c.fetchall() if len(rows) == 0: print("SELECT ID,NickName,Sex,birthdayYear,certfState,Password FROM u_user WHERE password='******' AND nickname='%s'"%(password,loginName)) utility.write_log(-1,loginName+"登陆失败",0) return None r = rows[0] user = session_data() user.user_id = r[0] user.nickname = r[1] user.sex = r[2] user.session_id = make_session_id() user.age = utility.now().year - r[3] user.certf_state = r[4] user.pwd = r[5] user.ip = utility.get_ip() global g_session_data g_session_data[user.session_id] = user utility.write_log(user.user_id,"登陆成功",1) return user
def on_predictor(self): try: self.prediction = prediction() self.prediction.show() self.hide() user_id = self.info['id'] log_type = jsd["lg_log"] log_data = "User: "******" loading predictor" action = "Load Predictor" risk = jsd["rsk_none"] utility.write_log(user_id, log_type, log_data, action, risk) except: QtWidgets.QMessageBox.critical( self, 'Error', "Error Loading Predictor. Please Try Again Later!!!") user_id = jsd["system"] log_type = jsd["lg_error"] log_data = "Error loading predictor for user: "******"Load Predictor" risk = jsd["rsk_high"] utility.write_log(user_id, log_type, log_data, action, risk)
def compose_command(flags, host): """ Compose rsync command for action :param flags: Dictionary than contains info for command :param host: Hostname of machine :return: list """ print_verbose(args.verbose, 'Build a rsync command') # Set rsync binary command = ['rsync'] if flags.action == 'backup': # Set mode option if flags.mode == 'Full': command.append('-ah') command.append('--no-links') # Write catalog file write_catalog(catalog_path, backup_id, 'type', 'Full') elif flags.mode == 'Incremental': last_full = get_last_full(catalog_path) if last_full: command.append('-ahu') command.append('--no-links') command.append('--link-dest={0}'.format(last_full)) # Write catalog file write_catalog(catalog_path, backup_id, 'type', 'Incremental') else: command.append('-ah') command.append('--no-links') # Write catalog file write_catalog(catalog_path, backup_id, 'type', 'Full') elif flags.mode == 'Mirror': command.append('-ah') command.append('--delete') # Write catalog file write_catalog(catalog_path, backup_id, 'type', 'Mirror') # Set verbosity if flags.verbose: command.append('-vP') # Set compress mode if flags.compress: command.append('-z') if flags.log: log_path = os.path.join( compose_destination(host, flags.destination), 'backup.log') command.append('--log-file={0}'.format(log_path)) utility.write_log(log_args['status'], log_args['destination'], 'INFO', 'rsync log path: {0}'.format(log_path)) elif flags.action == 'restore': command.append('-ahu') if flags.verbose: command.append('-vP') if flags.log: log_path = os.path.join(rpath, 'restore.log') command.append('--log-file={0}'.format(log_path)) utility.write_log(log_args['status'], log_args['destination'], 'INFO', 'rsync log path: {0}'.format(log_path)) print_verbose(args.verbose, 'Command flags are: {0}'.format(' '.join(command))) return command
def url_register(): try: ids = bottle.request.params.ids.split("@@") vals = bottle.request.params.vals.split("@@") r = {} for i,v in zip(ids,vals): r[int(i)] = v ctrl_record.save(session.get().user_id,r); return json.dumps({"result":"true"}); except Exception as error: utility.write_log(-1,"更改喜好信息失败"+ str(error),0) return json.dumps({"result":"false"});
def back(self): self.mn = main() self.mn.show() self.close() user_id = self.info['id'] log_type = jsd["lg_log"] log_data = "User: "******" left predictor" action = "Predictor" risk = jsd["rsk_none"] utility.write_log(user_id, log_type, log_data, action, risk)
def on_logout(self): user_id = self.info['id'] log_type = jsd["lg_log"] log_data = "User "+self.info['id'] + \ " logged out from ASLapp successfully" action = "Logout" risk = jsd["rsk_none"] utility.write_log(user_id, log_type, log_data, action, risk) self.lg = login() self.lg.show() self.close() utility.logout()
def visit(sex): user = session_data() user.user_id = make_visit_id() user.nickname = "" user.sex = sex user.session_id = str(user.user_id) user.age = 0 user.certf_state = 0 user.ip = utility.get_ip() if sex == 0: utility.write_log(user.user_id,"男游客访问",1) else: utility.write_log(user.user_id,"女游客访问",1) global g_session_data g_session_data[user.session_id] = user return user
def save(user_id,record_map): #key=record_id,value=record_value db = utility.get_db() c = db.cursor() data = ctrl_record.read(user_id) changed = False for id in record_map: if not ctrl_record._is_right_id(id): continue changed = True if id in data: c.execute("UPDATE u_record SET recordValue=? WHERE userID=? AND recordID=?", (record_map[id],user_id,id)) else: c.execute("INSERT INTO u_record(userID,recordID,recordValue)VALUES(?,?,?)", (user_id,id,record_map[id])) if not changed: return utility.write_log(user_id,"更改喜好信息",1,False) db.commit()
def compose_source(action, os_name, sources): """ Compose source :param action: command action (backup, restore, archive) :param os_name: Name of operating system :param sources: Dictionary or string than contains the paths of source :return: list """ if action == 'backup': src_list = [] # Add include to the list folders = map_dict_folder(os_name) # Write catalog file write_catalog(catalog_path, backup_id, 'os', os_name) custom = True if 'System' in sources: src_list.append(':{0}'.format(folders['System'])) return src_list if 'User' in sources: src_list.append(':{0}'.format(folders['User'])) custom = False if 'Config' in sources: src_list.append(':{0}'.format(folders['Config'])) custom = False if 'Application' in sources: src_list.append(':{0}'.format(folders['Application'])) custom = False if 'Log' in sources: src_list.append(':{0}'.format(folders['Log'])) custom = False if custom: # This is custom data for custom_data in sources: src_list.append( ':{0}'.format("'" + custom_data.replace("'", "'\\''") + "'")) utility.write_log( log_args['status'], log_args['destination'], 'INFO', 'OS {0}; backup folder {1}'.format(os_name, ' '.join(src_list))) print_verbose(args.verbose, 'Include this criteria: {0}'.format(' '.join(src_list))) return src_list
def register(nick,pwd,sex,age): assert int(sex) <= 1 age = int(age) try: now = utility.now() db = utility.get_db() c = db.cursor() c.execute("INSERT INTO u_user (NickName,Sex,Password,CreateDate,BirthdayYear,CertfState)VALUES(?,?,?,?,?,?)", (nick,int(sex),pwd,now.strftime("%Y-%m-%d %H:%M:%S"),now.year - age,0)) db.commit() #不这样做取不到user_id user = session.login(nick,pwd) c.execute("INSERT INTO u_profile(ID,EditDate)VALUES(?,?)",(user.user_id,now)) utility.write_log(user.user_id,"注册成功",1,False) db.commit() return user except Exception as err: utility.write_log(-1,nick+"注册失败",0) traceback.print_exc() return None;
def compose_destination(computer_name, folder): """ Compose folder destination of backup :param computer_name: name of source computer :param folder: path of backup :return: string """ # Create root folder of backup first_layer = os.path.join(folder, computer_name) second_layer = os.path.join(first_layer, utility.time_for_folder()) if not os.path.exists(first_layer): os.mkdir(first_layer) utility.write_log(log_args['status'], log_args['destination'], 'INFO', 'Create folder {0}'.format(first_layer)) if not os.path.exists(second_layer): os.mkdir(second_layer) utility.write_log(log_args['status'], log_args['destination'], 'INFO', 'Create folder {0}'.format(second_layer)) # Write catalog file write_catalog(catalog_path, backup_id, 'path', second_layer) print_verbose(args.verbose, 'Destination is {0}'.format(second_layer)) return second_layer
def archive_policy(catalog, destination): """ Archive policy :param catalog: catalog file :param destination: destination pth of archive file """ config = read_catalog(catalog) for bid in config.sections(): full_count = count_full(config, config.get(bid, 'name')) if (config.get(bid, 'archived', fallback='unset') == 'unset') and not \ (config.get(bid, 'cleaned', fallback=False)): type_backup = config.get(bid, 'type') path = config.get(bid, 'path') date = config.get(bid, 'timestamp') logpath = os.path.join(os.path.dirname(path), 'general.log') utility.print_verbose( args.verbose, "Check archive this backup {0}. Folder {1}".format(bid, path)) if (type_backup == 'Full') and (full_count <= 1): continue archive = utility.archive(path, date, args.days, destination) if archive == 0: write_catalog(catalog, bid, 'archived', 'True') print(utility.PrintColor.GREEN + 'SUCCESS: Archive {0} successfully.'.format(path) + utility.PrintColor.END) utility.write_log(log_args['status'], logpath, 'INFO', 'Archive {0} successfully.'.format(path)) elif archive == 1: print(utility.PrintColor.RED + 'ERROR: Archive {0} failed.'.format(path) + utility.PrintColor.END) utility.write_log(log_args['status'], logpath, 'ERROR', 'Archive {0} failed.'.format(path)) else: utility.print_verbose( args.verbose, "No archive backup {0}. Folder {1}".format(bid, path))
def retention_policy(host, catalog, logpath): """ Retention policy :param host: hostname of machine :param catalog: catalog file :param logpath: path of log file """ config = read_catalog(catalog) full_count = count_full(config, host) for bid in config.sections(): if (config.get(bid, 'cleaned', fallback='unset') == 'unset') and (config.get(bid, 'name') == host): type_backup = config.get(bid, 'type') path = config.get(bid, 'path') date = config.get(bid, 'timestamp') utility.print_verbose( args.verbose, "Check cleanup this backup {0}. Folder {1}".format(bid, path)) if (type_backup == 'Full') and (full_count <= 1): continue cleanup = utility.cleanup(path, date, args.retention) if cleanup == 0: write_catalog(catalog, bid, 'cleaned', 'True') print(utility.PrintColor.GREEN + 'SUCCESS: Cleanup {0} successfully.'.format(path) + utility.PrintColor.END) utility.write_log(log_args['status'], logpath, 'INFO', 'Cleanup {0} successfully.'.format(path)) elif cleanup == 1: print(utility.PrintColor.RED + 'ERROR: Cleanup {0} failed.'.format(path) + utility.PrintColor.END) utility.write_log(log_args['status'], logpath, 'ERROR', 'Cleanup {0} failed.'.format(path)) else: utility.print_verbose( args.verbose, "No cleanup backup {0}. Folder {1}".format(bid, path))
def __init__(self): utility.write_log("开始执行...") self.ses = requests.session()
def snap(self): try: self.x1, self.y1, self.x2, self.y2 = 20, 20, 220, 270 img_cropped = self.image[self.y1:self.y2, self.x1:self.x2] cv2.imwrite(jsd["path1"], img_cropped) self.lbl_img_snap.setPixmap(QtGui.QPixmap(jsd["path1"])) self.lbl_img_snap.show() edges = cv2.Canny(img_cropped, 75, 150) cv2.imwrite(jsd["path2"], edges) user_id = self.info['id'] log_type = jsd["lg_log"] log_data = "User: "******" snaped successfully" action = "Snap" risk = jsd["rsk_none"] utility.write_log(user_id, log_type, log_data, action, risk) except: QtWidgets.QMessageBox.critical( self, 'Error', "Error Taking Snap. Please Try Again Later!!!") user_id = jsd["system"] log_type = jsd["lg_error"] log_data = "Snapping failed for user: "******"Snap" risk = jsd["rsk_high"] utility.write_log(user_id, log_type, log_data, action, risk) try: # CNN Predictor Code Block from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense from keras import backend as K from keras.preprocessing import image from keras.optimizers import Adam os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # dimensions of our images img_width, img_height = 200, 250 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) else: input_shape = (img_width, img_height, 3) names = { 0: '1', 1: '2', 2: '3', 3: '4', 4: '5', 5: '6', 6: '7', 7: '8', 8: '9', 9: 'A', 10: 'B', 11: 'C', 12: 'D', 13: 'E', 14: 'F', 15: 'G', 16: 'H', 17: 'I', 18: 'K', 19: 'L', 20: 'M', 21: 'N', 22: 'O', 23: 'P', 24: 'Q', 25: 'R', 26: 'S', 27: 'T', 28: 'U', 29: 'V', 30: 'W', 31: 'X', 32: 'Y', } _dir = jsd["path2"] # input model = Sequential() model.add( Conv2D(32, (3, 3), input_shape=input_shape, padding='same')) model.add(Dropout(0.2)) #model.add(MaxPooling2D(pool_size=(2, 2))) # first convo model.add(Conv2D(32, (3, 3), padding='valid')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) # second convo model.add(Conv2D(64, (3, 3), padding='valid')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) # third convo model.add(Conv2D(64, (3, 3), padding='valid')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) # fourth convo model.add(Conv2D(128, (3, 3), padding='valid')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) # fifth convo model.add(Conv2D(128, (3, 3), padding='valid')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) # fully connected model.add(Flatten()) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(33)) model.add(Activation('softmax')) # load model model.load_weights('./models/trained_model.h5') model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-3), metrics=['categorical_accuracy']) img = image.load_img(_dir, target_size=(img_width, img_height)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) images = np.vstack([x]) p_classes = model.predict_classes(images) letter = names[p_classes[0]] print(letter) self.lbl_ans.setText(letter) # End Of CNN Predictor Code Block user_id = self.info['id'] log_type = jsd["lg_log"] log_data = "User: "******" predicted the letter or number: " + \ letter + " successfully" action = "Prediction" risk = jsd["rsk_none"] utility.write_log(user_id, log_type, log_data, action, risk) except: QtWidgets.QMessageBox.critical( self, 'Error', "Error In Prediction. Please Try Again Later!!!") user_id = jsd["system"] log_type = jsd["lg_error"] log_data = "Prediction failed for user: "******"Prediction" risk = jsd["rsk_high"] utility.write_log(user_id, log_type, log_data, action, risk)
write_catalog(catalog_path, backup_id, 'name', hostname) # Compose source if args.data: srcs = args.data source_list = compose_source(args.action, args.type, srcs) elif args.customdata: srcs = args.customdata source_list = compose_source(args.action, args.type, srcs) else: source_list = [] # Compose source <user>@<hostname> format cmd.append('{0}@{1}'.format(args.user, hostname).__add__( " ".join(source_list))) # Compose destination bck_dst = compose_destination(hostname, args.destination) utility.write_log(log_args['status'], log_args['destination'], 'INFO', 'Backup on folder {0}'.format(bck_dst)) cmd.append(bck_dst) # Compose pull commands cmds.append(' '.join(cmd)) # Write catalog file write_catalog(catalog_path, backup_id, 'timestamp', utility.time_for_log()) # Create a symlink for last backup utility.make_symlink( bck_dst, os.path.join(args.destination, hostname, 'last_backup')) # Start backup run_in_parallel(start_process, cmds, args.parallel) # Check restore session if args.action == 'restore':
def test_write_log(self): self.assertEqual( utility.write_log(0, 'test', 'test data', 'testing', 'None'), True)
import preferences import sys sys.path.append("../Utility/") import utility sys.path.append("../Processor/") import processor if __name__ == '__main__': pref = preferences.Preferences() try: pref.check_preferences() utility.wait(pref.hours_of_sleep) processor.stories_and_posts_all(pref.accounts, pref.number_of_processes, pref.number_of_accounts, pref.username, pref.password, pref.rest_time, pref.driver_function, pref.driver_string, pref.number_of_posts, utility.write_log) except Exception as e: utility.write_log("Error! " + str(e))
0x0123, 0x4567, 0x3210, 0x7654, 0x89AB, 0xCDEF, 0xBA98, 0xFEDC] if "LR" in algo: batch_size = 10000 buf_size = 1000000 model = LR([X_dim, X_field], batch_size, data_path + camp + "/urp-model/lr.pickle" # None , [('uniform', -0.001, 0.001, seeds[4])], ['sgd', 1e-3, 'sum'], 0) # 1e-3 print("batch size={0}, buf size={1}".format(batch_size, buf_size)) print(model.log) if mode == "train": if save_model: utility.write_log(log_path, model.log) # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) # sess_config = tf.ConfigProto(gpu_options=gpu_options) # with tf.Session(graph=model.graph, config=sess_config) as sess: with tf.Session(graph=model.graph) as sess: tf.initialize_all_variables().run() print("model initialized") _iter = 0 while True: _iter += 1 print("iteration {0} start".format(_iter)) train_data_set = open(train_path, 'rb') start_time = time.time()
def __del__(self): utility.write_log("执行结束...") self.ses.close()
X_test = prepare_data(testDF)[dtc_features] #predict SGD #predictOnTest = sgdClassifier(X_train, y_trainDF, X_test) #predict Naive Bayes #predictOnTest = gauss(X_train, y_trainDF, X_test) #predict Logistic Regression #predictOnTest = logReg(X_train, y_trainDF, X_test) #predict Decision Tree Classifier - creates a png file of tree predictOnTest = tree(X_train, y_trainDF, X_test, "png", "tree5.png") #predict Random Forest Classifier #predictOnTest = random_forest(X_train, y_trainDF, X_test) #predict Voting Classifier predictOnTest = voting_classifier(X_train, y_trainDF, X_test) #predict Bagging Classifier #predictOnTest = bagging(X_train, y_trainDF, X_test) comment = "All features, AVG for fare=NA. To Kaggle" write_log(list(X_train.columns), accuracySGD, accuracyGauss, accuracyLogReg, accuracyTree, accuracyRandomForest, accuracyVoting, accuracyBagging, comment) #writeCsv("csv/sgd.csv", passengerIdDF, predictOnTest) #writeCsv("csv/gauss.csv", passengerIdDF, predictOnTest) #writeCsv("csv/logistic.csv", passengerIdDF, predictOnTest) #writeCsv("csv/tree.csv", passengerIdDF, predictOnTest) #writeCsv("csv/random_forest.csv", passengerIdDF, predictOnTest) writeCsv("csv/voting.csv", passengerIdDF, predictOnTest) #writeCsv("csv/bagging.csv", passengerIdDF, predictOnTest)
def run_in_parallel(fn, commands, limit): """ Run in parallel with limit :param fn: function in parallelism :param commands: args commands of function :param limit: number of parallel process """ # Start a Pool with "limit" processes pool = Pool(processes=limit) jobs = [] for command, plog in zip(commands, logs): # Run the function proc = pool.apply_async(func=fn, args=(command, )) jobs.append(proc) print('Start {0} on {1}'.format(args.action, plog['hostname'])) print_verbose(args.verbose, "rsync command: {0}".format(command)) utility.write_log( log_args['status'], plog['destination'], 'INFO', 'Start process {0} on {1}'.format(args.action, plog['hostname'])) if args.action == 'backup': # Write catalog file write_catalog(catalog_path, plog['id'], 'start', utility.time_for_log()) # Wait for jobs to complete before exiting while not all([p.ready() for p in jobs]): time.sleep(5) # Check exit code of command for p, command, plog in zip(jobs, commands, logs): if p.get() != 0: print(utility.PrintColor.RED + 'ERROR: Command {0} exit with code: {1}'.format( command, p.get()) + utility.PrintColor.END) utility.write_log( log_args['status'], plog['destination'], 'ERROR', 'Finish process {0} on {1} with error:{2}'.format( args.action, plog['hostname'], p.get())) if args.action == 'backup': # Write catalog file write_catalog(catalog_path, plog['id'], 'end', utility.time_for_log()) write_catalog(catalog_path, plog['id'], 'status', "{0}".format(p.get())) else: print(utility.PrintColor.GREEN + 'SUCCESS: Command {0}'.format(command) + utility.PrintColor.END) utility.write_log( log_args['status'], plog['destination'], 'INFO', 'Finish process {0} on {1}'.format(args.action, plog['hostname'])) if args.action == 'backup': # Write catalog file write_catalog(catalog_path, plog['id'], 'end', utility.time_for_log()) write_catalog(catalog_path, plog['id'], 'status', "{0}".format(p.get())) if args.retention: # Retention policy retention_policy(plog['hostname'], catalog_path, plog['destination']) # Safely terminate the pool pool.close() pool.join()
model = LR( [X_dim, X_field], batch_size, data_path + camp + "/urp-model/lr.pickle" # None , [('uniform', -0.001, 0.001, seeds[4])], ['sgd', 1e-3, 'sum'], 0) # 1e-3 print("batch size={0}, buf size={1}".format(batch_size, buf_size)) print(model.log) if mode == "train": if save_model: utility.write_log(log_path, model.log) # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) # sess_config = tf.ConfigProto(gpu_options=gpu_options) # with tf.Session(graph=model.graph, config=sess_config) as sess: with tf.Session(graph=model.graph) as sess: tf.initialize_all_variables().run() print("model initialized") _iter = 0 while True: _iter += 1 print("iteration {0} start".format(_iter)) train_data_set = open(train_path, 'rb') start_time = time.time()