def init(): util.lock() util.set_img_format() util.override_keras_directory_iterator_next() util.set_classes_from_train_dir() util.set_samples_info() if not os.path.exists(config.trained_dir): os.mkdir(config.trained_dir)
def init(): util.lock() util.set_img_format() util.override_keras_directory_iterator_next() util.set_classes_from_train_dir() util.set_samples_info() if util.get_keras_backend_name() != 'theano': util.tf_allow_growth() if not os.path.exists(config.trained_dir): os.mkdir(config.trained_dir)
def init(): util.lock()#detect previous precess has completed or not util.set_img_format()#set image format: channels first or channels last util.override_keras_directory_iterator_next() util.set_classes_from_train_dir()#data_dir: data/sorted/train/ util.set_samples_info() if util.get_keras_backend_name() != 'theano': util.tf_allow_growth() if not os.path.exists(config.trained_dir): os.mkdir(config.trained_dir)
def init(): util.lock() util.set_img_format() # Pay extremely attention to the RGB->BGR, the next method is override. util.override_keras_directory_iterator_next() util.set_classes_from_train_dir() util.set_samples_info() if util.get_keras_backend_name() != 'theano': util.tf_allow_growth() if not os.path.exists(config.trained_dir): os.mkdir(config.trained_dir)
def doCron(watch): if config.Config.cronenabled == "0": return if not os.path.exists(os.path.join(config.Config.scriptsdir, "broctl-config.sh")): util.output("error: broctl-config.sh not found (try 'broctl install')") return config.Config.config["cron"] = "1" # Flag to indicate that we're running from cron. if not util.lock(): return util.bufferOutput() if watch: # Check whether nodes are still running an restart if neccessary. for (node, isrunning) in control.isRunning(config.Config.nodes()): if not isrunning and node.hasCrashed(): control.start([node]) # Check for dead hosts. _checkHosts() # Generate statistics. _logStats(5) # Check available disk space. _checkDiskSpace() # Expire old log files. _expireLogs() # Update the HTTP stats directory. _updateHTTPStats() # Run external command if we have one. if config.Config.croncmd: (success, output) = execute.runLocalCmd(config.Config.croncmd) if not success: util.output("error running croncmd: %s" % config.Config.croncmd) # Mail potential output. output = util.getBufferedOutput() if output: util.sendMail("cron: " + output.split("\n")[0], output) util.unlock() config.Config.config["cron"] = "0" util.debug(1, "cron done")
def doCron(watch): if config.Config.cronenabled == "0": return config.Config.config[ "cron"] = "1" # Flag to indicate that we're running from cron. if not util.lock(): return util.bufferOutput() if watch: # Check whether nodes are still running an restart if neccessary. for (node, isrunning) in control.isRunning(config.Config.nodes()): if not isrunning and node.hasCrashed(): control.start([node]) # Check for dead hosts. _checkHosts() # Generate statistics. _logStats(5) # Check available disk space. _checkDiskSpace() # Expire old log files. _expireLogs() # Update the HTTP stats directory. _updateHTTPStats() # Run external command if we have one. if config.Config.croncmd: execute.runLocalCmd(config.Config.croncmd) # Mail potential output. output = util.getBufferedOutput() if output: util.sendMail("cron: " + output.split("\n")[0], output) util.unlock() config.Config.config["cron"] = "0" util.debug(1, "cron done")
def doCron(): if config.Config.cronenabled == "0": return if not util.lock(): return util.bufferOutput() config.Config.config["cron"] = "1" # Flag to indicate that we're running from cron. # Check whether nodes are still running an restart if neccessary. for (node, isrunning) in control.isRunning(config.Config.nodes()): if not isrunning and node.hasCrashed(): control.start([node]) # Check for dead hosts. _checkHosts() # Generate statistics. _logStats(5) # Check available disk space. _checkDiskSpace() # Expire old log files. _expireLogs() # Update the HTTP stats directory. _updateHTTPStats() # Run external command if we have one. if config.Config.croncmd: execute.runLocalCmd(config.Config.croncmd) # Mail potential output. output = util.getBufferedOutput() if output: util.sendMail("cron: " + output.split("\n")[0], output) config.Config.config["cron"] = "0" util.unlock()
def create_user_github(username, email): r = g.redis username = username.lower() if r.exists("username.to.id:" + username): return None, "Username exists, please try a different one." if not util.lock('create_user.' + username): return None, "Please wait some time before creating a new user." user_id = r.incr("users.count") auth_token = util.get_rand() now = int(time.time()) pl = r.pipeline() pl.hmset( "user:%s" % user_id, { "id": user_id, "username": username, "ctime": now, "karma": config.UserInitialKarma, "about": "", "email": email, "auth": auth_token, "apisecret": util.get_rand(), "flags": "g", #github user "karma_incr_time": now, "replies": 0, }) pl.set("username.to.id:" + username, user_id) pl.set("auth:" + auth_token, user_id) pl.execute() util.unlock('create_user.' + username) return auth_token, None
def create_user(username, password, userip): r = g.redis username = username.lower() if r.exists("username.to.id:" + username): return None, "Username exists, please try a different one." if not util.lock('create_user.' + username): return None, "Please wait some time before creating a new user." user_id = r.incr("users.count") auth_token = util.get_rand() salt = util.get_rand() now = int(time.time()) pl = r.pipeline() pl.hmset("user:%s" % user_id, { "id": user_id, "username": username, "salt": salt, "password": util.hash_password(password, salt), "ctime": now, "karma": config.UserInitialKarma, "about": "", "email": "", "auth": auth_token, "apisecret": util.get_rand(), "flags": "", "karma_incr_time": now, "replies": 0, }) pl.set("username.to.id:" + username, user_id) pl.set("auth:" + auth_token, user_id) pl.execute() util.unlock('create_user.' + username) return auth_token, None
type=int, help='will freeze the first N layers and unfreeze the rest') return parser.parse_args() if __name__ == '__main__': try: args = parse_args() if args.data_dir: config.data_dir = args.data_dir config.set_paths() if args.model: config.model = args.model util.lock() util.override_keras_directory_iterator_next() util.set_classes_from_train_dir() util.set_samples_info() if not os.path.exists(config.trained_dir): os.mkdir(config.trained_dir) class_weight = util.get_class_weight(config.train_dir) # TODO: create class instance without dynamic module import model = util.get_model_class_instance( class_weight=class_weight, nb_epoch=args.nb_epoch, freeze_layers_number=args.freeze_layers_number) model.train() print('Training is finished!') except (KeyboardInterrupt, SystemExit):
parser.add_option("-s", "--source", dest="source") parser.add_option("-m", "--maxfiles", default=100, dest="maxfiles") parser.add_option("-a", "--maxage", default=5, dest="maxage") parser.add_option("-d", "--debug", default=False, action="store_true", dest="debug") (options, args) = parser.parse_args() if options.debug: util.set_debug() else: util.set_log_file(options.source, True) lock_f = util.lock(options.source) #Create a lock config = config.load_source_config( options.source) #Load config file for source time_file = "%s/%s/%s.time" % (os.environ["DATA_DIR"], config["local_dir"], options.source) util.info("Acquiring data from %s" % options.source) try: # Read last check time try: last_time = cPickle.load(open(time_file, 'rb')) except IOError: last_time = "" t = random.random() * 15
exit(1) if args.db == "pri": newdb.init_db() database = newdb.get_db() elif args.db == "sec": newdb.init_db(os.environ["SEC_DB_CONFIG_FILE"]) database = newdb.get_db() else: util.error("Valid database choices are [pri|sec]") sys.exit(1) try: #create a lock lockf = None lockf = util.lock(args.db) tmpDir = None database.setAttributeAutoCreate(True) date=fromDate while date<toDate: datestr = date.strftime("%Y%m%d") buffer = [] try: database.start_transaction() #get the most recent ts from the db max_ts_in_db = database.execute("SELECT max(date) as max_ts_in_db FROM mus").fetchall() if len(max_ts_in_db) == 0: max_ts_in_db = 0 else: max_ts_in_db = max_ts_in_db[0]["max_ts_in_db"]
lines[lineIndex] = util.queuePrefix + ",".join(builds) + "\n" util.writeStatus(lines) return True return False if __name__ == "__main__": os.chdir(sys.path[0]) util.dlog("Start to run test") lockRunName = "lock-run" if util.hasLock(lockRunName): util.dlog("It's already running") quit() util.lock(lockRunName) hasUpdate = False while util.atomOp(getAvailable): hasUpdate = True if testMethod == "webmark": downloadBinary() runTest() util.atomOp(updateStatus) if not hasUpdate: util.dlog("Has no test to run") util.unlock(lockRunName)